summaryrefslogtreecommitdiffstats
path: root/python.d
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:19:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:20:17 +0000
commita64a253794ac64cb40befee54db53bde17dd0d49 (patch)
treec1024acc5f6e508814b944d99f112259bb28b1be /python.d
parentNew upstream version 1.10.0+dfsg (diff)
downloadnetdata-a64a253794ac64cb40befee54db53bde17dd0d49.tar.xz
netdata-a64a253794ac64cb40befee54db53bde17dd0d49.zip
New upstream version 1.11.0+dfsgupstream/1.11.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'python.d')
-rw-r--r--python.d/Makefile.am204
-rw-r--r--python.d/Makefile.in1107
-rw-r--r--python.d/README.md2363
-rw-r--r--python.d/apache.chart.py129
-rw-r--r--python.d/beanstalk.chart.py250
-rw-r--r--python.d/bind_rndc.chart.py243
-rw-r--r--python.d/ceph.chart.py313
-rw-r--r--python.d/chrony.chart.py105
-rw-r--r--python.d/couchdb.chart.py410
-rw-r--r--python.d/cpufreq.chart.py113
-rw-r--r--python.d/cpuidle.chart.py148
-rw-r--r--python.d/dns_query_time.chart.py134
-rw-r--r--python.d/dnsdist.chart.py101
-rw-r--r--python.d/dovecot.chart.py126
-rw-r--r--python.d/elasticsearch.chart.py554
-rw-r--r--python.d/example.chart.py47
-rw-r--r--python.d/exim.chart.py39
-rw-r--r--python.d/fail2ban.chart.py213
-rw-r--r--python.d/freeradius.chart.py125
-rw-r--r--python.d/go_expvar.chart.py237
-rw-r--r--python.d/haproxy.chart.py339
-rw-r--r--python.d/hddtemp.chart.py110
-rw-r--r--python.d/httpcheck.chart.py117
-rw-r--r--python.d/icecast.chart.py92
-rw-r--r--python.d/ipfs.chart.py124
-rw-r--r--python.d/isc_dhcpd.chart.py193
-rw-r--r--python.d/mdstat.chart.py189
-rw-r--r--python.d/memcached.chart.py183
-rw-r--r--python.d/mongodb.chart.py673
-rw-r--r--python.d/mysql.chart.py491
-rw-r--r--python.d/nginx.chart.py75
-rw-r--r--python.d/nginx_plus.chart.py491
-rw-r--r--python.d/nsd.chart.py93
-rw-r--r--python.d/ntpd.chart.py380
-rw-r--r--python.d/ovpn_status_log.chart.py118
-rw-r--r--python.d/phpfpm.chart.py169
-rw-r--r--python.d/portcheck.chart.py159
-rw-r--r--python.d/postfix.chart.py50
-rw-r--r--python.d/postgres.chart.py666
-rw-r--r--python.d/powerdns.chart.py58
-rw-r--r--python.d/python-modules-installer.sh158
-rw-r--r--python.d/python_modules/__init__.py1
-rw-r--r--python.d/python_modules/base.py9
-rw-r--r--python.d/python_modules/bases/FrameworkServices/ExecutableService.py88
-rw-r--r--python.d/python_modules/bases/FrameworkServices/LogService.py78
-rw-r--r--python.d/python_modules/bases/FrameworkServices/MySQLService.py158
-rw-r--r--python.d/python_modules/bases/FrameworkServices/SimpleService.py262
-rw-r--r--python.d/python_modules/bases/FrameworkServices/SocketService.py261
-rw-r--r--python.d/python_modules/bases/FrameworkServices/UrlService.py126
-rw-r--r--python.d/python_modules/bases/FrameworkServices/__init__.py0
-rw-r--r--python.d/python_modules/bases/__init__.py0
-rw-r--r--python.d/python_modules/bases/charts.py382
-rw-r--r--python.d/python_modules/bases/collection.py144
-rw-r--r--python.d/python_modules/bases/loaders.py66
-rw-r--r--python.d/python_modules/bases/loggers.py205
-rw-r--r--python.d/python_modules/pyyaml2/__init__.py315
-rw-r--r--python.d/python_modules/pyyaml2/composer.py139
-rw-r--r--python.d/python_modules/pyyaml2/constructor.py675
-rw-r--r--python.d/python_modules/pyyaml2/cyaml.py85
-rw-r--r--python.d/python_modules/pyyaml2/dumper.py62
-rw-r--r--python.d/python_modules/pyyaml2/emitter.py1140
-rw-r--r--python.d/python_modules/pyyaml2/error.py75
-rw-r--r--python.d/python_modules/pyyaml2/events.py86
-rw-r--r--python.d/python_modules/pyyaml2/loader.py40
-rw-r--r--python.d/python_modules/pyyaml2/nodes.py49
-rw-r--r--python.d/python_modules/pyyaml2/parser.py589
-rw-r--r--python.d/python_modules/pyyaml2/reader.py190
-rw-r--r--python.d/python_modules/pyyaml2/representer.py484
-rw-r--r--python.d/python_modules/pyyaml2/resolver.py224
-rw-r--r--python.d/python_modules/pyyaml2/scanner.py1457
-rw-r--r--python.d/python_modules/pyyaml2/serializer.py111
-rw-r--r--python.d/python_modules/pyyaml2/tokens.py104
-rw-r--r--python.d/python_modules/pyyaml3/__init__.py312
-rw-r--r--python.d/python_modules/pyyaml3/composer.py139
-rw-r--r--python.d/python_modules/pyyaml3/constructor.py686
-rw-r--r--python.d/python_modules/pyyaml3/cyaml.py85
-rw-r--r--python.d/python_modules/pyyaml3/dumper.py62
-rw-r--r--python.d/python_modules/pyyaml3/emitter.py1137
-rw-r--r--python.d/python_modules/pyyaml3/error.py75
-rw-r--r--python.d/python_modules/pyyaml3/events.py86
-rw-r--r--python.d/python_modules/pyyaml3/loader.py40
-rw-r--r--python.d/python_modules/pyyaml3/nodes.py49
-rw-r--r--python.d/python_modules/pyyaml3/parser.py589
-rw-r--r--python.d/python_modules/pyyaml3/reader.py192
-rw-r--r--python.d/python_modules/pyyaml3/representer.py374
-rw-r--r--python.d/python_modules/pyyaml3/resolver.py224
-rw-r--r--python.d/python_modules/pyyaml3/scanner.py1448
-rw-r--r--python.d/python_modules/pyyaml3/serializer.py111
-rw-r--r--python.d/python_modules/pyyaml3/tokens.py104
-rw-r--r--python.d/python_modules/third_party/__init__.py0
-rw-r--r--python.d/python_modules/third_party/lm_sensors.py257
-rw-r--r--python.d/python_modules/third_party/ordereddict.py128
-rw-r--r--python.d/python_modules/urllib3/__init__.py97
-rw-r--r--python.d/python_modules/urllib3/_collections.py314
-rw-r--r--python.d/python_modules/urllib3/connection.py373
-rw-r--r--python.d/python_modules/urllib3/connectionpool.py899
-rw-r--r--python.d/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--python.d/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--python.d/python_modules/urllib3/contrib/_securetransport/bindings.py590
-rw-r--r--python.d/python_modules/urllib3/contrib/_securetransport/low_level.py343
-rw-r--r--python.d/python_modules/urllib3/contrib/appengine.py296
-rw-r--r--python.d/python_modules/urllib3/contrib/ntlmpool.py112
-rw-r--r--python.d/python_modules/urllib3/contrib/pyopenssl.py457
-rw-r--r--python.d/python_modules/urllib3/contrib/securetransport.py807
-rw-r--r--python.d/python_modules/urllib3/contrib/socks.py188
-rw-r--r--python.d/python_modules/urllib3/exceptions.py246
-rw-r--r--python.d/python_modules/urllib3/fields.py178
-rw-r--r--python.d/python_modules/urllib3/filepost.py94
-rw-r--r--python.d/python_modules/urllib3/packages/__init__.py5
-rw-r--r--python.d/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--python.d/python_modules/urllib3/packages/backports/makefile.py53
-rw-r--r--python.d/python_modules/urllib3/packages/ordered_dict.py259
-rw-r--r--python.d/python_modules/urllib3/packages/six.py868
-rw-r--r--python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py19
-rw-r--r--python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py157
-rw-r--r--python.d/python_modules/urllib3/poolmanager.py440
-rw-r--r--python.d/python_modules/urllib3/request.py148
-rw-r--r--python.d/python_modules/urllib3/response.py622
-rw-r--r--python.d/python_modules/urllib3/util/__init__.py54
-rw-r--r--python.d/python_modules/urllib3/util/connection.py130
-rw-r--r--python.d/python_modules/urllib3/util/request.py118
-rw-r--r--python.d/python_modules/urllib3/util/response.py81
-rw-r--r--python.d/python_modules/urllib3/util/retry.py401
-rw-r--r--python.d/python_modules/urllib3/util/selectors.py581
-rw-r--r--python.d/python_modules/urllib3/util/ssl_.py337
-rw-r--r--python.d/python_modules/urllib3/util/timeout.py242
-rw-r--r--python.d/python_modules/urllib3/util/url.py230
-rw-r--r--python.d/python_modules/urllib3/util/wait.py40
-rw-r--r--python.d/rabbitmq.chart.py193
-rw-r--r--python.d/redis.chart.py200
-rw-r--r--python.d/retroshare.chart.py76
-rw-r--r--python.d/samba.chart.py126
-rw-r--r--python.d/sensors.chart.py139
-rw-r--r--python.d/smartd_log.chart.py346
-rw-r--r--python.d/springboot.chart.py151
-rw-r--r--python.d/squid.chart.py120
-rw-r--r--python.d/tomcat.chart.py153
-rw-r--r--python.d/traefik.chart.py181
-rw-r--r--python.d/varnish.chart.py241
-rw-r--r--python.d/web_log.chart.py1047
140 files changed, 0 insertions, 38344 deletions
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
deleted file mode 100644
index a5fcc7394..000000000
--- a/python.d/Makefile.am
+++ /dev/null
@@ -1,204 +0,0 @@
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-CLEANFILES = \
- python-modules-installer.sh \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-
-SUFFIXES = .in
-
-dist_python_SCRIPTS = \
- python-modules-installer.sh \
- $(NULL)
-
-dist_python_DATA = \
- README.md \
- apache.chart.py \
- beanstalk.chart.py \
- bind_rndc.chart.py \
- ceph.chart.py \
- chrony.chart.py \
- couchdb.chart.py \
- cpufreq.chart.py \
- cpuidle.chart.py \
- dns_query_time.chart.py \
- dnsdist.chart.py \
- dovecot.chart.py \
- elasticsearch.chart.py \
- example.chart.py \
- exim.chart.py \
- fail2ban.chart.py \
- freeradius.chart.py \
- go_expvar.chart.py \
- haproxy.chart.py \
- hddtemp.chart.py \
- httpcheck.chart.py \
- icecast.chart.py \
- ipfs.chart.py \
- isc_dhcpd.chart.py \
- mdstat.chart.py \
- memcached.chart.py \
- mongodb.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_plus.chart.py \
- nsd.chart.py \
- ntpd.chart.py \
- ovpn_status_log.chart.py \
- phpfpm.chart.py \
- portcheck.chart.py \
- postfix.chart.py \
- postgres.chart.py \
- powerdns.chart.py \
- rabbitmq.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- samba.chart.py \
- sensors.chart.py \
- springboot.chart.py \
- squid.chart.py \
- smartd_log.chart.py \
- tomcat.chart.py \
- traefik.chart.py \
- varnish.chart.py \
- web_log.chart.py \
- $(NULL)
-
-pythonmodulesdir=$(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- python_modules/base.py \
- $(NULL)
-
-basesdir=$(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir=$(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir=$(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- $(NULL)
-
-pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir=$(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir=$(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir=$(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir=$(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
deleted file mode 100644
index d6e11d0cf..000000000
--- a/python.d/Makefile.in
+++ /dev/null
@@ -1,1107 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
- $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_python_DATA) $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \
- $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \
- $(dist_third_party_DATA)
-subdir = python.d
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
- $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
- $(top_srcdir)/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
- $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" \
- "$(DESTDIR)$(bases_framework_servicesdir)" \
- "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" \
- "$(DESTDIR)$(python_urllib3_backportsdir)" \
- "$(DESTDIR)$(python_urllib3_contribdir)" \
- "$(DESTDIR)$(python_urllib3_packagesdir)" \
- "$(DESTDIR)$(python_urllib3_securetransportdir)" \
- "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
- "$(DESTDIR)$(python_urllib3_utildir)" \
- "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \
- "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)"
-SCRIPTS = $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_python_DATA) $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \
- $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \
- $(dist_third_party_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python-modules-installer.sh \
- $(NULL)
-
-SUFFIXES = .in
-dist_python_SCRIPTS = \
- python-modules-installer.sh \
- $(NULL)
-
-dist_python_DATA = \
- README.md \
- apache.chart.py \
- beanstalk.chart.py \
- bind_rndc.chart.py \
- ceph.chart.py \
- chrony.chart.py \
- couchdb.chart.py \
- cpufreq.chart.py \
- cpuidle.chart.py \
- dns_query_time.chart.py \
- dnsdist.chart.py \
- dovecot.chart.py \
- elasticsearch.chart.py \
- example.chart.py \
- exim.chart.py \
- fail2ban.chart.py \
- freeradius.chart.py \
- go_expvar.chart.py \
- haproxy.chart.py \
- hddtemp.chart.py \
- httpcheck.chart.py \
- icecast.chart.py \
- ipfs.chart.py \
- isc_dhcpd.chart.py \
- mdstat.chart.py \
- memcached.chart.py \
- mongodb.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_plus.chart.py \
- nsd.chart.py \
- ntpd.chart.py \
- ovpn_status_log.chart.py \
- phpfpm.chart.py \
- portcheck.chart.py \
- postfix.chart.py \
- postgres.chart.py \
- powerdns.chart.py \
- rabbitmq.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- samba.chart.py \
- sensors.chart.py \
- springboot.chart.py \
- squid.chart.py \
- smartd_log.chart.py \
- tomcat.chart.py \
- traefik.chart.py \
- varnish.chart.py \
- web_log.chart.py \
- $(NULL)
-
-pythonmodulesdir = $(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- python_modules/base.py \
- $(NULL)
-
-basesdir = $(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir = $(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir = $(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- $(NULL)
-
-pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir = $(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir = $(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir = $(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir = $(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu python.d/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu python.d/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pythonSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_basesDATA: $(dist_bases_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
- done
-
-uninstall-dist_basesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
-install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
- done
-
-uninstall-dist_bases_framework_servicesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonDATA: $(dist_python_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
- done
-
-uninstall-dist_pythonDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_backportsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_contribDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_packagesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_securetransportDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_utilDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonmodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml2DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-install-dist_third_partyDATA: $(dist_third_party_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
- done
-
-uninstall-dist_third_partyDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
- install-dist_pythonyaml3DATA install-dist_third_partyDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
- install-dist_pythonyaml3DATA install-dist_third_partyDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/python.d/README.md b/python.d/README.md
deleted file mode 100644
index faabba2c7..000000000
--- a/python.d/README.md
+++ /dev/null
@@ -1,2363 +0,0 @@
-# Disclaimer
-
-Every module should be compatible with python2 and python3.
-All third party libraries should be installed system-wide or in `python_modules` directory.
-Module configurations are written in YAML and **pyYAML is required**.
-
-Every configuration file must have one of two formats:
-
-- Configuration for only one job:
-
-```yaml
-update_every : 2 # update frequency
-retries : 1 # how many failures in update() is tolerated
-priority : 20000 # where it is shown on dashboard
-
-other_var1 : bla # variables passed to module
-other_var2 : alb
-```
-
-- Configuration for many jobs (ex. mysql):
-
-```yaml
-# module defaults:
-update_every : 2
-retries : 1
-priority : 20000
-
-local: # job name
- update_every : 5 # job update frequency
- other_var1 : some_val # module specific variable
-
-other_job:
- priority : 5 # job position on dashboard
- retries : 20 # job retries
- other_var2 : val # module specific variable
-```
-
-`update_every`, `retries`, and `priority` are always optional.
-
----
-
-The following python.d modules are supported:
-
-# apache
-
-This module will monitor one or more apache servers depending on configuration.
-
-**Requirements:**
- * apache with enabled `mod_status`
-
-It produces the following charts:
-
-1. **Requests** in requests/s
- * requests
-
-2. **Connections**
- * connections
-
-3. **Async Connections**
- * keepalive
- * closing
- * writing
-
-4. **Bandwidth** in kilobytes/s
- * sent
-
-5. **Workers**
- * idle
- * busy
-
-6. **Lifetime Avg. Requests/s** in requests/s
- * requests_sec
-
-7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
- * size_sec
-
-8. **Lifetime Avg. Response Size** in bytes/request
- * size_req
-
-### configuration
-
-Needs only `url` to server's `server-status?auto`
-
-Here is an example for 2 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/server-status?auto'
- retries : 20
-
-remote:
- url : 'http://www.apache.org/server-status?auto'
- update_every : 5
- retries : 4
-```
-
-Without configuration, module attempts to connect to `http://localhost/server-status?auto`
-
----
-
-# apache_cache
-
-Module monitors apache mod_cache log and produces only one chart:
-
-**cached responses** in percent cached
- * hit
- * miss
- * other
-
-### configuration
-
-Sample:
-
-```yaml
-update_every : 10
-priority : 120000
-retries : 5
-log_path : '/var/log/apache2/cache.log'
-```
-
-If no configuration is given, module will attempt to read log file at `/var/log/apache2/cache.log`
-
----
-
-# beanstalk
-
-Module provides server and tube level statistics:
-
-**Requirements:**
- * `python-beanstalkc`
- * `python-yaml`
-
-**Server statistics:**
-
-1. **Cpu usage** in cpu time
- * user
- * system
-
-2. **Jobs rate** in jobs/s
- * total
- * timeouts
-
-3. **Connections rate** in connections/s
- * connections
-
-4. **Commands rate** in commands/s
- * put
- * peek
- * peek-ready
- * peek-delayed
- * peek-buried
- * reserve
- * use
- * watch
- * ignore
- * delete
- * release
- * bury
- * kick
- * stats
- * stats-job
- * stats-tube
- * list-tubes
- * list-tube-used
- * list-tubes-watched
- * pause-tube
-
-5. **Current tubes** in tubes
- * tubes
-
-6. **Current jobs** in jobs
- * urgent
- * ready
- * reserved
- * delayed
- * buried
-
-7. **Current connections** in connections
- * written
- * producers
- * workers
- * waiting
-
-8. **Binlog** in records/s
- * written
- * migrated
-
-9. **Uptime** in seconds
- * uptime
-
-**Per tube statistics:**
-
-1. **Jobs rate** in jobs/s
- * jobs
-
-2. **Jobs** in jobs
- * using
- * ready
- * reserved
- * delayed
- * buried
-
-3. **Connections** in connections
- * using
- * waiting
- * watching
-
-4. **Commands** in commands/s
- * deletes
- * pauses
-
-5. **Pause** in seconds
- * since
- * left
-
-
-### configuration
-
-Sample:
-
-```yaml
-host : '127.0.0.1'
-port : 11300
-```
-
-If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
-
----
-
-# bind_rndc
-
-Module parses bind dump file to collect real-time performance metrics
-
-**Requirements:**
- * Version of bind must be 9.6 +
- * Netdata must have permissions to run `rndc stats`
-
-It produces:
-
-1. **Name server statistics**
- * requests
- * responses
- * success
- * auth_answer
- * nonauth_answer
- * nxrrset
- * failure
- * nxdomain
- * recursion
- * duplicate
- * rejections
-
-2. **Incoming queries**
- * RESERVED0
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * MX
- * TXT
- * X25
- * AAAA
- * SRV
- * NAPTR
- * A6
- * DS
- * RSIG
- * DNSKEY
- * SPF
- * ANY
- * DLV
-
-3. **Outgoing queries**
- * Same as Incoming queries
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- named_stats_path : '/var/log/bind/named.stats'
-```
-
-If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
-
----
-
-# chrony
-
-This module monitors the precision and statistics of a local chronyd server.
-
-It produces:
-
-* frequency
-* last offset
-* RMS offset
-* residual freq
-* root delay
-* root dispersion
-* skew
-* system time
-
-**Requirements:**
-Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
-
-### Configuration
-
-Sample:
-```yaml
-# data collection frequency:
-update_every: 1
-
-# chrony query command:
-local:
- command: 'chronyc -n tracking'
-```
-
----
-
-# ceph
-
-This module monitors the ceph cluster usage and consuption data of a server.
-
-It produces:
-
-* Cluster statistics (usage, available, latency, objects, read/write rate)
-* OSD usage
-* OSD latency
-* Pool usage
-* Pool read/write operations
-* Pool read/write rate
-* number of objects per pool
-
-**Requirements:**
-
-- `rados` python module
-- Granting read permissions to ceph group from keyring file
-```shell
-# chmod 640 /etc/ceph/ceph.client.admin.keyring
-```
-
-### Configuration
-
-Sample:
-```yaml
-local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-```
-
----
-
-# couchdb
-
-This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
-
-* Overall server reads/writes
-* HTTP traffic breakdown
- * Request methods (`GET`, `PUT`, `POST`, etc.)
- * Response status codes (`200`, `201`, `4xx`, etc.)
-* Active server tasks
-* Replication status (CouchDB 2.1 and up only)
-* Erlang VM stats
-* Optional per-database statistics: sizes, # of docs, # of deleted docs
-
-### Configuration
-
-Sample for a local server running on port 5984:
-```yaml
-local:
- user: 'admin'
- pass: 'password'
- node: 'couchdb@127.0.0.1'
-```
-
-Be sure to specify a correct admin-level username and password.
-
-You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
-
-If you want per-database statistics, these need to be added to the configuration, separated by spaces:
-```yaml
-local:
- ...
- databases: 'db1 db2 db3 ...'
-```
-
----
-
-# cpufreq
-
-This module shows the current CPU frequency as set by the cpufreq kernel
-module.
-
-**Requirement:**
-You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
-enabled in your kernel.
-
-This module tries to read from one of two possible locations. On
-initialization, it tries to read the `time_in_state` files provided by
-cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
-falls back to using the more inaccurate `scaling_cur_freq` file (which only
-represents the **current** CPU frequency, and doesn't account for any state
-changes which happen between updates).
-
-It produces one chart with multiple lines (one line per core).
-
-### configuration
-
-Sample:
-
-```yaml
-sys_dir: "/sys/devices"
-```
-
-If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
-Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
-
----
-
-# cpuidle
-
-This module monitors the usage of CPU idle states.
-
-**Requirement:**
-Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
-
-It produces one stacked chart per CPU, showing the percentage of time spent in
-each state.
-
----
-# dns_query_time
-
-This module provides dns query time statistics.
-
-**Requirement:**
-* `python-dnspython` package
-
-It produces one aggregate chart or one chart per dns server, showing the query time.
-
----
-
-# dnsdist
-
-Module monitor dnsdist performance and health metrics.
-
-Following charts are drawn:
-
-1. **Response latency**
- * latency-slow
- * latency100-1000
- * latency50-100
- * latency10-50
- * latency1-10
- * latency0-1
-
-2. **Cache performance**
- * cache-hits
- * cache-misses
-
-3. **ACL events**
- * acl-drops
- * rule-drop
- * rule-nxdomain
- * rule-refused
-
-4. **Noncompliant data**
- * empty-queries
- * no-policy
- * noncompliant-queries
- * noncompliant-responses
-
-5. **Queries**
- * queries
- * rdqueries
- * rdqueries
-
-6. **Health**
- * downstream-send-errors
- * downstream-timeouts
- * servfail-responses
- * trunc-failures
-
-### configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:5053/jsonstat?command=stats'
- user : 'username'
- pass : 'password'
- header:
- X-API-Key: 'dnsdist-api-key'
-```
-
-# dovecot
-
-This module provides statistics information from dovecot server.
-Statistics are taken from dovecot socket by executing `EXPORT global` command.
-More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
-
-**Requirement:**
-Dovecot unix socket with R/W permissions for user netdata or dovecot with configured TCP/IP socket.
-
-Module gives information with following charts:
-
-1. **sessions**
- * active sessions
-
-2. **logins**
- * logins
-
-3. **commands** - number of IMAP commands
- * commands
-
-4. **Faults**
- * minor
- * major
-
-5. **Context Switches**
- * volountary
- * involountary
-
-6. **disk** in bytes/s
- * read
- * write
-
-7. **bytes** in bytes/s
- * read
- * write
-
-8. **number of syscalls** in syscalls/s
- * read
- * write
-
-9. **lookups** - number of lookups per second
- * path
- * attr
-
-10. **hits** - number of cache hits
- * hits
-
-11. **attempts** - authorization attemts
- * success
- * failure
-
-12. **cache** - cached authorization hits
- * hit
- * miss
-
-### configuration
-
-Sample:
-
-```yaml
-localtcpip:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-
-localsocket:
- name : 'local'
- socket : '/var/run/dovecot/stats'
-```
-
-If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
-
----
-
-# elasticsearch
-
-Module monitor elasticsearch performance and health metrics
-
-It produces:
-
-1. **Search performance** charts:
- * Number of queries, fetches
- * Time spent on queries, fetches
- * Query and fetch latency
-
-2. **Indexing performance** charts:
- * Number of documents indexed, index refreshes, flushes
- * Time spent on indexing, refreshing, flushing
- * Indexing and flushing latency
-
-3. **Memory usage and garbace collection** charts:
- * JVM heap currently in use, commited
- * Count of garbage collections
- * Time spent on garbage collections
-
-4. **Host metrics** charts:
- * Available file descriptors in percent
- * Opened HTTP connections
- * Cluster communication transport metrics
-
-5. **Queues and rejections** charts:
- * Number of queued/rejected threads in thread pool
-
-6. **Fielddata cache** charts:
- * Fielddata cache size
- * Fielddata evictions and circuit breaker tripped count
-
-7. **Cluster health API** charts:
- * Cluster status
- * Nodes and tasks statistics
- * Shards statistics
-
-8. **Cluster stats API** charts:
- * Nodes statistics
- * Query cache statistics
- * Docs statistics
- * Store statistics
- * Indices and shards statistics
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- host : 'ipaddress' # Server ip address or hostname
- port : 'password' # Port on which elasticsearch listed
- cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
- cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-# exim
-
-Simple module executing `exim -bpc` to grab exim queue.
-This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
-
-It produces only one chart:
-
-1. **Exim Queue Emails**
- * emails
-
-Configuration is not needed.
-
----
-
-# fail2ban
-
-Module monitor fail2ban log file to show all bans for all active jails
-
-**Requirements:**
- * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
-
-It produces one chart with multiple lines (one line per jail)
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
- exclude: 'dropbear apache'
-```
-If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
-If conf file is not found default jail is `ssh`.
-
----
-
-# freeradius
-
-Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
-
-It produces:
-
-1. **Authentication counters:**
- * access-accepts
- * access-rejects
- * auth-dropped-requests
- * auth-duplicate-requests
- * auth-invalid-requests
- * auth-malformed-requests
- * auth-unknown-types
-
-2. **Accounting counters:** [optional]
- * accounting-requests
- * accounting-responses
- * acct-dropped-requests
- * acct-duplicate-requests
- * acct-invalid-requests
- * acct-malformed-requests
- * acct-unknown-types
-
-3. **Proxy authentication counters:** [optional]
- * proxy-access-accepts
- * proxy-access-rejects
- * proxy-auth-dropped-requests
- * proxy-auth-duplicate-requests
- * proxy-auth-invalid-requests
- * proxy-auth-malformed-requests
- * proxy-auth-unknown-types
-
-4. **Proxy accounting counters:** [optional]
- * proxy-accounting-requests
- * proxy-accounting-responses
- * proxy-acct-dropped-requests
- * proxy-acct-duplicate-requests
- * proxy-acct-invalid-requests
- * proxy-acct-malformed-requests
- * proxy-acct-unknown-typesa
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- host : 'localhost'
- port : '18121'
- secret : 'adminsecret'
- acct : False # Freeradius accounting statistics.
- proxy_auth : False # Freeradius proxy authentication statistics.
- proxy_acct : False # Freeradius proxy accounting statistics.
-```
-
-**Freeradius server configuration:**
-
-The configuration for the status server is automatically created in the sites-available directory.
-By default, server is enabled and can be queried from every client.
-FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
-
-To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
- * cd sites-enabled
- * ln -s ../sites-available/status status
-
-and restart/reload your FREERADIUS server.
-
----
-
-# go_expvar
-
----
-
-The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library.
-
-`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications) for more info.
-
-For the memory statistics, it produces the following charts:
-
-1. **Heap allocations** in kB
- * alloc: size of objects allocated on the heap
- * inuse: size of allocated heap spans
-
-2. **Stack allocations** in kB
- * inuse: size of allocated stack spans
-
-3. **MSpan allocations** in kB
- * inuse: size of allocated mspan structures
-
-4. **MCache allocations** in kB
- * inuse: size of allocated mcache structures
-
-5. **Virtual memory** in kB
- * sys: size of reserved virtual address space
-
-6. **Live objects**
- * live: number of live objects in memory
-
-7. **GC pauses average** in ns
- * avg: average duration of all GC stop-the-world pauses
-
-### configuration
-
-Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration.
-
----
-
-# haproxy
-
-Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
-And health metrics such as backend servers status (server check should be used).
-
-Plugin can obtain data from url **OR** unix socket.
-
-**Requirement:**
-Socket MUST be readable AND writable by netdata user.
-
-It produces:
-
-1. **Frontend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
-
-2. **Backend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
-
-3. **Health** chart
- * number of failed servers for every backend (in DOWN state)
-
-
-### configuration
-
-Sample:
-
-```yaml
-via_url:
- user : 'username' # ONLY IF stats auth is used
- pass : 'password' # # ONLY IF stats auth is used
- url : 'http://ip.address:port/url;csv;norefresh'
-```
-
-OR
-
-```yaml
-via_socket:
- socket : 'path/to/haproxy/sock'
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-# hddtemp
-
-Module monitors disk temperatures from one or more hddtemp daemons.
-
-**Requirement:**
-Running `hddtemp` in daemonized mode with access on tcp port
-
-It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
-
-### configuration
-
-Sample:
-
-```yaml
-update_every: 3
-host: "127.0.0.1"
-port: 7634
-```
-
-If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
-
----
-
-# httpcheck
-
-Module monitors remote http server for availability and response time.
-
-Following charts are drawn per job:
-
-1. **Response time** ms
- * Time in 0.1 ms resolution in which the server responds.
- If the connection failed, the value is missing.
-
-2. **Status** boolean
- * Connection successful
- * Unexpected content: No Regex match found in the response
- * Unexpected status code: Do we get 500 errors?
- * Connection failed: port not listening or blocked
- * Connection timed out: host or port unreachable
-
-### configuration
-
-Sample configuration and their default values.
-
-```yaml
-server:
- url: 'http://host:port/path' # required
- status_accepted: # optional
- - 200
- timeout: 1 # optional, supports decimals (e.g. 0.2)
- update_every: 3 # optional
- regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
- redirect: yes # optional
-```
-
-### notes
-
- * The status chart is primarily intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * This plugin is meant for simple use cases. Currently, the accuracy of the
- response time is low and should be used as reference only.
-
----
-
-# icecast
-
-This module will monitor number of listeners for active sources.
-
-**Requirements:**
- * icecast version >= 2.4.0
-
-It produces the following charts:
-
-1. **Listeners** in listeners
- * source number
-
-### configuration
-
-Needs only `url` to server's `/status-json.xsl`
-
-Here is an example for remote server:
-
-```yaml
-remote:
- url : 'http://1.2.3.4:8443/status-json.xsl'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
-
----
-
-# IPFS
-
-Module monitors [IPFS](https://ipfs.io) basic information.
-
-1. **Bandwidth** in kbits/s
- * in
- * out
-
-2. **Peers**
- * peers
-
-### configuration
-
-Only url to IPFS server is needed.
-
-Sample:
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:5001'
-```
-
----
-
-# isc_dhcpd
-
-Module monitor leases database to show all active leases for given pools.
-
-**Requirements:**
- * dhcpd leases file MUST BE readable by netdata
- * pools MUST BE in CIDR format
-
-It produces:
-
-1. **Pools utilization** Aggregate chart for all pools.
- * utilization in percent
-
-2. **Total leases**
- * leases (overall number of leases for all pools)
-
-3. **Active leases** for every pools
- * leases (number of active leases in pool)
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- leases_path : '/var/lib/dhcp/dhcpd.leases'
- pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
-```
-
-In case of python2 you need to install `py2-ipaddress` to make plugin work.
-The module will not work If no configuration is given.
-
----
-
-
-# mdstat
-
-Module monitor /proc/mdstat
-
-It produces:
-
-1. **Health** Number of failed disks in every array (aggregate chart).
-
-2. **Disks stats**
- * total (number of devices array ideally would have)
- * inuse (number of devices currently are in use)
-
-3. **Current status**
- * resync in percent
- * recovery in percent
- * reshape in percent
- * check in percent
-
-4. **Operation status** (if resync/recovery/reshape/check is active)
- * finish in minutes
- * speed in megabytes/s
-
-### configuration
-No configuration is needed.
-
----
-
-# memcached
-
-Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
-
-1. **Network** in kilobytes/s
- * read
- * written
-
-2. **Connections** per second
- * current
- * rejected
- * total
-
-3. **Items** in cluster
- * current
- * total
-
-4. **Evicted and Reclaimed** items
- * evicted
- * reclaimed
-
-5. **GET** requests/s
- * hits
- * misses
-
-6. **GET rate** rate in requests/s
- * rate
-
-7. **SET rate** rate in requests/s
- * rate
-
-8. **DELETE** requests/s
- * hits
- * misses
-
-9. **CAS** requests/s
- * hits
- * misses
- * bad value
-
-10. **Increment** requests/s
- * hits
- * misses
-
-11. **Decrement** requests/s
- * hits
- * misses
-
-12. **Touch** requests/s
- * hits
- * misses
-
-13. **Touch rate** rate in requests/s
- * rate
-
-### configuration
-
-Sample:
-
-```yaml
-localtcpip:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-```
-
-If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
-
----
-
-# mongodb
-
-Module monitor mongodb performance and health metrics
-
-**Requirements:**
- * `python-pymongo` package.
-
-You need to install it manually.
-
-
-Number of charts depends on mongodb version, storage engine and other features (replication):
-
-1. **Read requests**:
- * query
- * getmore (operation the cursor executes to get additional data from query)
-
-2. **Write requests**:
- * insert
- * delete
- * update
-
-3. **Active clients**:
- * readers (number of clients with read operations in progress or queued)
- * writers (number of clients with write operations in progress or queued)
-
-4. **Journal transactions**:
- * commits (count of transactions that have been written to the journal)
-
-5. **Data written to the journal**:
- * volume (volume of data)
-
-6. **Background flush** (MMAPv1):
- * average ms (average time taken by flushes to execute)
- * last ms (time taken by the last flush)
-
-8. **Read tickets** (WiredTiger):
- * in use (number of read tickets in use)
- * available (number of available read tickets remaining)
-
-9. **Write tickets** (WiredTiger):
- * in use (number of write tickets in use)
- * available (number of available write tickets remaining)
-
-10. **Cursors**:
- * opened (number of cursors currently opened by MongoDB for clients)
- * timedOut (number of cursors that have timed)
- * noTimeout (number of open cursors with timeout disabled)
-
-11. **Connections**:
- * connected (number of clients currently connected to the database server)
- * unused (number of unused connections available for new clients)
-
-12. **Memory usage metrics**:
- * virtual
- * resident (amount of memory used by the database process)
- * mapped
- * non mapped
-
-13. **Page faults**:
- * page faults (number of times MongoDB had to request from disk)
-
-14. **Cache metrics** (WiredTiger):
- * percentage of bytes currently in the cache (amount of space taken by cached data)
- * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
-
-15. **Pages evicted from cache** (WiredTiger):
- * modified
- * unmodified
-
-16. **Queued requests**:
- * readers (number of read request currently queued)
- * writers (number of write request currently queued)
-
-17. **Errors**:
- * msg (number of message assertions raised)
- * warning (number of warning assertions raised)
- * regular (number of regular assertions raised)
- * user (number of assertions corresponding to errors generated by users)
-
-18. **Storage metrics** (one chart for every database)
- * dataSize (size of all documents + padding in the database)
- * indexSize (size of all indexes in the database)
- * storageSize (size of all extents in the database)
-
-19. **Documents in the database** (one chart for all databases)
- * documents (number of objects in the database among all the collections)
-
-20. **tcmalloc metrics**
- * central cache free
- * current total thread cache
- * pageheap free
- * pageheap unmapped
- * thread cache free
- * transfer cache free
- * heap size
-
-21. **Commands total/failed rate**
- * count
- * createIndex
- * delete
- * eval
- * findAndModify
- * insert
-
-22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
- * Global lock
- * Database lock
- * Collection lock
- * Metadata lock
- * oplog lock
-
-23. **Replica set members state**
- * state
-
-24. **Oplog window**
- * window (interval of time between the oldest and the latest entries in the oplog)
-
-25. **Replication lag**
- * member (time when last entry from the oplog was applied for every member)
-
-26. **Replication set member heartbeat latency**
- * member (time when last heartbeat was received from replica set member)
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- name : 'local'
- host : '127.0.0.1'
- port : 27017
- user : 'netdata'
- pass : 'netdata'
-
-```
-
-If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
-
----
-
-
-# mysql
-
-Module monitors one or more mysql servers
-
-**Requirements:**
- * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
-
-It will produce following charts (if data is available):
-
-1. **Bandwidth** in kbps
- * in
- * out
-
-2. **Queries** in queries/sec
- * queries
- * questions
- * slow queries
-
-3. **Operations** in operations/sec
- * opened tables
- * flush
- * commit
- * delete
- * prepare
- * read first
- * read key
- * read next
- * read prev
- * read random
- * read random next
- * rollback
- * save point
- * update
- * write
-
-4. **Table Locks** in locks/sec
- * immediate
- * waited
-
-5. **Select Issues** in issues/sec
- * full join
- * full range join
- * range
- * range check
- * scan
-
-6. **Sort Issues** in issues/sec
- * merge passes
- * range
- * scan
-
-### configuration
-
-You can provide, per server, the following:
-
-1. username which have access to database (deafults to 'root')
-2. password (defaults to none)
-3. mysql my.cnf configuration file
-4. mysql socket (optional)
-5. mysql host (ip or hostname)
-6. mysql port (defaults to 3306)
-
-Here is an example for 3 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-retries : 5
-
-local:
- 'my.cnf' : '/etc/mysql/my.cnf'
- priority : 90000
-
-local_2:
- user : 'root'
- pass : 'blablablabla'
- socket : '/var/run/mysqld/mysqld.sock'
- update_every : 1
-
-remote:
- user : 'admin'
- pass : 'bla'
- host : 'example.org'
- port : 9000
- retries : 20
-```
-
-If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
-
----
-
-# nginx
-
-This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
-
-**Requirements:**
- * nginx with configured 'ngx_http_stub_status_module'
- * 'location /stub_status'
-
-Example nginx configuration can be found in 'python.d/nginx.conf'
-
-It produces following charts:
-
-1. **Active Connections**
- * active
-
-2. **Requests** in requests/s
- * requests
-
-3. **Active Connections by Status**
- * reading
- * writing
- * waiting
-
-4. **Connections Rate** in connections/s
- * accepts
- * handled
-
-### configuration
-
-Needs only `url` to server's `stub_status`
-
-Here is an example for local server:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/stub_status'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost/stub_status`
-
----
-
-# nginx_plus
-
-This module will monitor one or more nginx_plus servers depending on configuration.
-Servers can be either local or remote.
-
-Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
-
-It produces following charts:
-
-1. **Requests total** in requests/s
- * total
-
-2. **Requests current** in requests
- * current
-
-3. **Connection Statistics** in connections/s
- * accepted
- * dropped
-
-4. **Workers Statistics** in workers
- * idle
- * active
-
-5. **SSL Handshakes** in handshakes/s
- * successful
- * failed
-
-6. **SSL Session Reuses** in sessions/s
- * reused
-
-7. **SSL Memory Usage** in percent
- * usage
-
-8. **Processes** in processes
- * respawned
-
-For every server zone:
-
-1. **Processing** in requests
- * processing
-
-2. **Requests** in requests/s
- * requests
-
-3. **Responses** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-4. **Traffic** in kilobits/s
- * received
- * sent
-
-For every upstream:
-
-1. **Peers Requests** in requests/s
- * peer name (dimension per peer)
-
-2. **All Peers Responses** in responses/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-3. **Peer Responses** in requests/s (for every peer)
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-4. **Peers Connections** in active
- * peer name (dimension per peer)
-
-5. **Peers Connections Usage** in percent
- * peer name (dimension per peer)
-
-6. **All Peers Traffic** in KB
- * received
- * sent
-
-7. **Peer Traffic** in KB/s (for every peer)
- * received
- * sent
-
-8. **Peer Timings** in ms (for every peer)
- * header
- * response
-
-9. **Memory Usage** in percent
- * usage
-
-10. **Peers Status** in state
- * peer name (dimension per peer)
-
-11. **Peers Total Downtime** in seconds
- * peer name (dimension per peer)
-
-For every cache:
-
-1. **Traffic** in KB
- * served
- * written
- * bypass
-
-2. **Memory Usage** in percent
- * usage
-
-### configuration
-
-Needs only `url` to server's `status`
-
-Here is an example for local server:
-
-```yaml
-local:
- url : 'http://localhost/status'
-```
-
-Without configuration, module fail to start.
-
----
-
-# nsd
-
-Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
-
-**Requirements:**
- * Version of `nsd` must be 4.0+
- * Netdata must have permissions to run `nsd-control stats_noreset`
-
-It produces:
-
-1. **Queries**
- * queries
-
-2. **Zones**
- * master
- * slave
-
-3. **Protocol**
- * udp
- * udp6
- * tcp
- * tcp6
-
-4. **Query Type**
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * HINFO
- * MX
- * NAPTR
- * TXT
- * AAAA
- * SRV
- * ANY
-
-5. **Transfer**
- * NOTIFY
- * AXFR
-
-6. **Return Code**
- * NOERROR
- * FORMERR
- * SERVFAIL
- * NXDOMAIN
- * NOTIMP
- * REFUSED
- * YXDOMAIN
-
-
-Configuration is not needed.
-
----
-
-# ntpd
-
-Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
-
-**Requirements:**
- * Version: `NTPv4`
- * Local interrogation allowed in `/etc/ntp.conf` (default):
-
-```
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-```
-
-It produces:
-
-1. system
- * offset
- * jitter
- * frequency
- * delay
- * dispersion
- * stratum
- * tc
- * precision
-
-2. peers
- * offset
- * delay
- * dispersion
- * jitter
- * rootdelay
- * rootdispersion
- * stratum
- * hmode
- * pmode
- * hpoll
- * ppoll
- * precision
-
-**configuration**
-
-Sample:
-
-```yaml
-update_every: 10
-
-host: 'localhost'
-port: '123'
-show_peers: yes
-# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
-peer_filter: '(127\..*)|(192\.168\..*)'
-# check for new/changed peers every 60 updates
-peer_rescan: 60
-```
-
-Sample (multiple jobs):
-
-Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
-
-```yaml
-local:
- host: 'localhost'
-
-otherhost:
- host: 'otherhost'
-```
-
-If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
-
----
-
-# ovpn_status_log
-
-Module monitor openvpn-status log file.
-
-**Requirements:**
-
- * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
- so that multiple instances do not overwrite each other's output files.
-
- * Make sure NETDATA USER CAN READ openvpn-status.log
-
- * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
-
-It produces:
-
-1. **Users** OpenVPN active users
- * users
-
-2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
- * in
- * out
-
-### configuration
-
-Sample:
-
-```yaml
-default
- log_path : '/var/log/openvpn-status.log'
-```
-
----
-
-# phpfpm
-
-This module will monitor one or more php-fpm instances depending on configuration.
-
-**Requirements:**
- * php-fpm with enabled `status` page
- * access to `status` page via web server
-
-It produces following charts:
-
-1. **Active Connections**
- * active
- * maxActive
- * idle
-
-2. **Requests** in requests/s
- * requests
-
-3. **Performance**
- * reached
- * slow
-
-### configuration
-
-Needs only `url` to server's `status`
-
-Here is an example for local instance:
-
-```yaml
-update_every : 3
-priority : 90100
-
-local:
- url : 'http://localhost/status'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost/status`
-
----
-
-# portcheck
-
-Module monitors a remote TCP service.
-
-Following charts are drawn per host:
-
-1. **Latency** ms
- * Time required to connect to a TCP port.
- Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
-
-2. **Status** boolean
- * Connection successful
- * Could not create socket: possible DNS problems
- * Connection refused: port not listening or blocked
- * Connection timed out: host or port unreachable
-
-
-### configuration
-
-```yaml
-server:
- host: 'dns or ip' # required
- port: 22 # required
- timeout: 1 # optional
- update_every: 1 # optional
-```
-
-### notes
-
- * The error chart is intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * Currently, the accuracy of the latency is low and should be used as reference only.
-
----
-
-# postfix
-
-Simple module executing `postfix -p` to grab postfix queue.
-
-It produces only two charts:
-
-1. **Postfix Queue Emails**
- * emails
-
-2. **Postfix Queue Emails Size** in KB
- * size
-
-Configuration is not needed.
-
----
-
-# postgres
-
-Module monitors one or more postgres servers.
-
-**Requirements:**
-
- * `python-psycopg2` package. You have to install to manually.
-
-Following charts are drawn:
-
-1. **Database size** MB
- * size
-
-2. **Current Backend Processes** processes
- * active
-
-3. **Write-Ahead Logging Statistics** files/s
- * total
- * ready
- * done
-
-4. **Checkpoints** writes/s
- * scheduled
- * requested
-
-5. **Current connections to db** count
- * connections
-
-6. **Tuples returned from db** tuples/s
- * sequential
- * bitmap
-
-7. **Tuple reads from db** reads/s
- * disk
- * cache
-
-8. **Transactions on db** transactions/s
- * commited
- * rolled back
-
-9. **Tuples written to db** writes/s
- * inserted
- * updated
- * deleted
- * conflicts
-
-10. **Locks on db** count per type
- * locks
-
-### configuration
-
-```yaml
-socket:
- name : 'socket'
- user : 'postgres'
- database : 'postgres'
-
-tcp:
- name : 'tcp'
- user : 'postgres'
- database : 'postgres'
- host : 'localhost'
- port : 5432
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
-
----
-
-# powerdns
-
-Module monitor powerdns performance and health metrics.
-
-Following charts are drawn:
-
-1. **Queries and Answers**
- * udp-queries
- * udp-answers
- * tcp-queries
- * tcp-answers
-
-2. **Cache Usage**
- * query-cache-hit
- * query-cache-miss
- * packetcache-hit
- * packetcache-miss
-
-3. **Cache Size**
- * query-cache-size
- * packetcache-size
- * key-cache-size
- * meta-cache-size
-
-4. **Latency**
- * latency
-
-### configuration
-
-```yaml
-local:
- name : 'local'
- url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
- header :
- X-API-Key: 'change_me'
-```
-
----
-
-# rabbitmq
-
-Module monitor rabbitmq performance and health metrics.
-
-Following charts are drawn:
-
-1. **Queued Messages**
- * ready
- * unacknowledged
-
-2. **Message Rates**
- * ack
- * redelivered
- * deliver
- * publish
-
-3. **Global Counts**
- * channels
- * consumers
- * connections
- * queues
- * exchanges
-
-4. **File Descriptors**
- * used descriptors
-
-5. **Socket Descriptors**
- * used descriptors
-
-6. **Erlang processes**
- * used processes
-
-7. **Erlang run queue**
- * Erlang run queue
-
-8. **Memory**
- * free memory in megabytes
-
-9. **Disk Space**
- * free disk space in gigabytes
-
-### configuration
-
-```yaml
-socket:
- name : 'local'
- host : '127.0.0.1'
- port : 15672
- user : 'guest'
- pass : 'guest'
-
-```
-
-When no configuration file is found, module tries to connect to: `localhost:15672`.
-
----
-
-# redis
-
-Get INFO data from redis instance.
-
-Following charts are drawn:
-
-1. **Operations** per second
- * operations
-
-2. **Hit rate** in percent
- * rate
-
-3. **Memory utilization** in kilobytes
- * total
- * lua
-
-4. **Database keys**
- * lines are creates dynamically based on how many databases are there
-
-5. **Clients**
- * connected
- * blocked
-
-6. **Slaves**
- * connected
-
-### configuration
-
-```yaml
-socket:
- name : 'local'
- socket : '/var/lib/redis/redis.sock'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 6379
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
-
----
-
-# samba
-
-Performance metrics of Samba file sharing.
-
-It produces the following charts:
-
-1. **Syscall R/Ws** in kilobytes/s
- * sendfile
- * recvfle
-
-2. **Smb2 R/Ws** in kilobytes/s
- * readout
- * writein
- * readin
- * writeout
-
-3. **Smb2 Create/Close** in operations/s
- * create
- * close
-
-4. **Smb2 Info** in operations/s
- * getinfo
- * setinfo
-
-5. **Smb2 Find** in operations/s
- * find
-
-6. **Smb2 Notify** in operations/s
- * notify
-
-7. **Smb2 Lesser Ops** as counters
- * tcon
- * negprot
- * tdis
- * cancel
- * logoff
- * flush
- * lock
- * keepalive
- * break
- * sessetup
-
-### configuration
-
-Requires that smbd has been compiled with profiling enabled. Also required
-that `smbd` was started either with the `-P 1` option or inside `smb.conf`
-using `smbd profiling level`.
-
-This plugin uses `smbstatus -P` which can only be executed by root. It uses
-sudo and assumes that it is configured such that the `netdata` user can
-execute smbstatus as root without password.
-
-For example:
-
- netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-
-```yaml
-update_every : 5 # update frequency
-```
-
----
-
-# sensors
-
-System sensors information.
-
-Charts are created dynamically.
-
-### configuration
-
-For detailed configuration information please read [`sensors.conf`](https://github.com/firehol/netdata/blob/master/conf.d/python.d/sensors.conf) file.
-
-### possible issues
-
-There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
-We are tracking such cases in issue [#827](https://github.com/firehol/netdata/issues/827).
-Please join this discussion for help.
-
----
-
-# springboot
-
-This module will monitor one or more Java Spring-boot applications depending on configuration.
-
-It produces following charts:
-
-1. **Response Codes** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
- * others
-
-2. **Threads**
- * daemon
- * total
-
-3. **GC Time** in milliseconds and **GC Operations** in operations/s
- * Copy
- * MarkSweep
- * ...
-
-4. **Heap Mmeory Usage** in KB
- * used
- * committed
-
-### configuration
-
-Please see the [Monitoring Java Spring Boot Applications](https://github.com/firehol/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration.
-
----
-
-# squid
-
-This module will monitor one or more squid instances depending on configuration.
-
-It produces following charts:
-
-1. **Client Bandwidth** in kilobits/s
- * in
- * out
- * hits
-
-2. **Client Requests** in requests/s
- * requests
- * hits
- * errors
-
-3. **Server Bandwidth** in kilobits/s
- * in
- * out
-
-4. **Server Requests** in requests/s
- * requests
- * errors
-
-### configuration
-
-```yaml
-priority : 50000
-
-local:
- request : 'cache_object://localhost:3128/counters'
- host : 'localhost'
- port : 3128
-```
-
-Without any configuration module will try to autodetect where squid presents its `counters` data
-
----
-
-# smartd_log
-
-Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
-
-It produces following charts (you can add additional attributes in the module configuration file):
-
-1. **Read Error Rate** attribute 1
-
-2. **Start/Stop Count** attribute 4
-
-3. **Reallocated Sectors Count** attribute 5
-
-4. **Seek Error Rate** attribute 7
-
-5. **Power-On Hours Count** attribute 9
-
-6. **Power Cycle Count** attribute 12
-
-7. **Load/Unload Cycles** attribute 193
-
-8. **Temperature** attribute 194
-
-9. **Current Pending Sectors** attribute 197
-
-10. **Off-Line Uncorrectable** attribute 198
-
-11. **Write Error Rate** attribute 200
-
-### configuration
-
-```yaml
-local:
- log_path : '/var/log/smartd/'
-```
-
-If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
-
----
-
-# tomcat
-
-Present tomcat containers memory utilization.
-
-Charts:
-
-1. **Requests** per second
- * accesses
-
-2. **Volume** in KB/s
- * volume
-
-3. **Threads**
- * current
- * busy
-
-4. **JVM Free Memory** in MB
- * jvm
-
-### configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
- user : 'tomcat_username'
- pass : 'secret_tomcat_password'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
-So it will probably fail.
-
----
-
-# Traefik
-
-Module uses the `health` API to provide statistics.
-
-It produces:
-
-1. **Responses** by statuses
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
-
-2. **Responses** by codes
- * 2xx (successful)
- * 5xx (internal server errors)
- * 3xx (redirect)
- * 4xx (bad)
- * 1xx (informational)
- * other (non-standart responses)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Requests**/s
- * request statistics
-
-5. **Total response time**
- * sum of all response time
-
-6. **Average response time**
-
-7. **Average response time per iteration**
-
-8. **Uptime**
- * Traefik server uptime
-
-### configuration
-
-Needs only `url` to server's `health`
-
-Here is an example for local server:
-
-```yaml
-update_every : 1
-priority : 60000
-
-local:
- url : 'http://localhost:8080/health'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/health`.
-
----
-
-# varnish cache
-
-Module uses the `varnishstat` command to provide varnish cache statistics.
-
-It produces:
-
-1. **Connections Statistics** in connections/s
- * accepted
- * dropped
-
-2. **Client Requests** in requests/s
- * received
-
-3. **All History Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
-
-4. **Current Poll Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
-
-5. **Expired Objects** in expired/s
- * objects
-
-6. **Least Recently Used Nuked Objects** in nuked/s
- * objects
-
-
-7. **Number Of Threads In All Pools** in threads
- * threads
-
-8. **Threads Statistics** in threads/s
- * created
- * failed
- * limited
-
-9. **Current Queue Length** in requests
- * in queue
-
-10. **Backend Connections Statistics** in connections/s
- * successful
- * unhealthy
- * reused
- * closed
- * resycled
- * failed
-
-10. **Requests To The Backend** in requests/s
- * received
-
-11. **ESI Statistics** in problems/s
- * errors
- * warnings
-
-12. **Memory Usage** in MB
- * free
- * allocated
-
-13. **Uptime** in seconds
- * uptime
-
-
-### configuration
-
-No configuration is needed.
-
----
-
-# web_log
-
-Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
-
-It produces following charts:
-
-1. **Response by type** requests/s
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
-
-2. **Response by code family** requests/s
- * 1xx (informational)
- * 2xx (successful)
- * 3xx (redirect)
- * 4xx (bad)
- * 5xx (internal server errors)
- * other (non-standart responses)
- * unmatched (the lines in the log file that are not matched)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Bandwidth** KB/s
- * received (bandwidth of requests)
- * send (bandwidth of responses)
-
-5. **Timings** ms (request processing time)
- * min (bandwidth of requests)
- * max (bandwidth of responses)
- * average (bandwidth of responses)
-
-6. **Request per url** requests/s (configured by user)
-
-7. **Http Methods** requests/s (requests per http method)
-
-8. **Http Versions** requests/s (requests per http version)
-
-9. **IP protocols** requests/s (requests per ip protocol version)
-
-10. **Curent Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
-
-11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
-
-
-### configuration
-
-```yaml
-nginx_log:
- name : 'nginx_log'
- path : '/var/log/nginx/access.log'
-
-apache_log:
- name : 'apache_log'
- path : '/var/log/apache/other_vhosts_access.log'
- categories:
- cacti : 'cacti.*'
- observium : 'observium'
-```
-
-Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
-
----
diff --git a/python.d/apache.chart.py b/python.d/apache.chart.py
deleted file mode 100644
index 789b3c099..000000000
--- a/python.d/apache.chart.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: apache netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://www.apache.org/server-status?auto'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
-
-CHARTS = {
- 'bytesperreq': {
- 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
- 'statistics', 'apache.bytesperreq', 'area'],
- 'lines': [
- ["size_req"]
- ]},
- 'workers': {
- 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
- 'lines': [
- ["idle"],
- ["busy"],
- ]},
- 'reqpersec': {
- 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
- 'apache.reqpersec', 'area'],
- 'lines': [
- ["requests_sec"]
- ]},
- 'bytespersec': {
- 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
- 'apache.bytesperreq', 'area'],
- 'lines': [
- ["size_sec", None, 'absolute', 8, 1000]
- ]},
- 'requests': {
- 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
- 'lines': [
- ["requests", None, 'incremental']
- ]},
- 'net': {
- 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
- 'lines': [
- ["sent", None, 'incremental', 8, 1]
- ]},
- 'connections': {
- 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
- 'lines': [
- ["connections"]
- ]},
- 'conns_async': {
- 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
- 'lines': [
- ["keepalive"],
- ["closing"],
- ["writing"]
- ]}
-}
-
-ASSIGNMENT = {"BytesPerReq": 'size_req',
- "IdleWorkers": 'idle',
- "IdleServers": 'idle_servers',
- "BusyWorkers": 'busy',
- "BusyServers": 'busy_servers',
- "ReqPerSec": 'requests_sec',
- "BytesPerSec": 'size_sec',
- "Total Accesses": 'requests',
- "Total kBytes": 'sent',
- "ConnsTotal": 'connections',
- "ConnsAsyncKeepAlive": 'keepalive',
- "ConnsAsyncClosing": 'closing',
- "ConnsAsyncWriting": 'writing'}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
-
- def check(self):
- self._manager = self._build_manager()
- data = self._get_data()
- if not data:
- return None
-
- if 'idle_servers' in data:
- self.module_name = 'lighttpd'
- for chart in self.definitions:
- if chart == 'workers':
- lines = self.definitions[chart]['lines']
- lines[0] = ["idle_servers", 'idle']
- lines[1] = ["busy_servers", 'busy']
- opts = self.definitions[chart]['options']
- opts[1] = opts[1].replace('apache', 'lighttpd')
- opts[4] = opts[4].replace('apache', 'lighttpd')
- return True
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
- data = dict()
-
- for row in raw_data.split('\n'):
- tmp = row.split(":")
- if tmp[0] in ASSIGNMENT:
- try:
- data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
- except (IndexError, ValueError):
- continue
- return data or None
diff --git a/python.d/beanstalk.chart.py b/python.d/beanstalk.chart.py
deleted file mode 100644
index 8880afdd9..000000000
--- a/python.d/beanstalk.chart.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: beanstalk netdata python.d module
-# Author: l2isbad
-
-try:
- import beanstalkc
- BEANSTALKC = True
-except ImportError:
- BEANSTALKC = False
-
-try:
- import yaml
- YAML = True
-except ImportError:
- YAML = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
- 'current_connections', 'binlog', 'uptime']
-
-CHARTS = {
- 'cpu_usage': {
- 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
- 'lines': [
- ['rusage-utime', 'user', 'incremental'],
- ['rusage-stime', 'system', 'incremental']
- ]
- },
- 'jobs_rate': {
- 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
- 'lines': [
- ['total-jobs', 'total', 'incremental'],
- ['job-timeouts', 'timeouts', 'incremental']
- ]
- },
- 'connections_rate': {
- 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
- 'area'],
- 'lines': [
- ['total-connections', 'connections', 'incremental']
- ]
- },
- 'commands_rate': {
- 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
- 'lines': [
- ['cmd-put', 'put', 'incremental'],
- ['cmd-peek', 'peek', 'incremental'],
- ['cmd-peek-ready', 'peek-ready', 'incremental'],
- ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
- ['cmd-peek-buried', 'peek-buried', 'incremental'],
- ['cmd-reserve', 'reserve', 'incremental'],
- ['cmd-use', 'use', 'incremental'],
- ['cmd-watch', 'watch', 'incremental'],
- ['cmd-ignore', 'ignore', 'incremental'],
- ['cmd-delete', 'delete', 'incremental'],
- ['cmd-release', 'release', 'incremental'],
- ['cmd-bury', 'bury', 'incremental'],
- ['cmd-kick', 'kick', 'incremental'],
- ['cmd-stats', 'stats', 'incremental'],
- ['cmd-stats-job', 'stats-job', 'incremental'],
- ['cmd-stats-tube', 'stats-tube', 'incremental'],
- ['cmd-list-tubes', 'list-tubes', 'incremental'],
- ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
- ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
- ['cmd-pause-tube', 'pause-tube', 'incremental']
- ]
- },
- 'current_tubes': {
- 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
- 'lines': [
- ['current-tubes', 'tubes']
- ]
- },
- 'current_jobs': {
- 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
- 'lines': [
- ['current-jobs-urgent', 'urgent'],
- ['current-jobs-ready', 'ready'],
- ['current-jobs-reserved', 'reserved'],
- ['current-jobs-delayed', 'delayed'],
- ['current-jobs-buried', 'buried']
- ]
- },
- 'current_connections': {
- 'options': [None, 'Current Connections', 'connections', 'server statistics',
- 'beanstalk.current_connections', 'line'],
- 'lines': [
- ['current-connections', 'written'],
- ['current-producers', 'producers'],
- ['current-workers', 'workers'],
- ['current-waiting', 'waiting']
- ]
- },
- 'binlog': {
- 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
- 'lines': [
- ['binlog-records-written', 'written', 'incremental'],
- ['binlog-records-migrated', 'migrated', 'incremental']
- ]
- },
- 'uptime': {
- 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
- 'lines': [
- ['uptime'],
- ]
- }
-}
-
-
-def tube_chart_template(name):
- order = ['{0}_jobs_rate'.format(name),
- '{0}_jobs'.format(name),
- '{0}_connections'.format(name),
- '{0}_commands'.format(name),
- '{0}_pause'.format(name)
- ]
- family = 'tube {0}'.format(name)
-
- charts = {
- order[0]: {
- 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
- 'lines': [
- ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
- ]},
- order[1]: {
- 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
- 'lines': [
- ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
- ['_'.join([name, 'current-jobs-ready']), 'ready'],
- ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
- ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
- ['_'.join([name, 'current-jobs-buried']), 'buried']
- ]},
- order[2]: {
- 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
- 'lines': [
- ['_'.join([name, 'current-using']), 'using'],
- ['_'.join([name, 'current-waiting']), 'waiting'],
- ['_'.join([name, 'current-watching']), 'watching']
- ]},
- order[3]: {
- 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
- 'lines': [
- ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
- ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
- ]},
- order[4]: {
- 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
- 'lines': [
- ['_'.join([name, 'pause']), 'since'],
- ['_'.join([name, 'pause-time-left']), 'left']
- ]}
-
- }
-
- return order, charts
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.configuration = configuration
- self.order = list(ORDER)
- self.definitions = dict(CHARTS)
- self.conn = None
- self.alive = True
-
- def check(self):
- if not BEANSTALKC:
- self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
- return False
-
- if not YAML:
- self.error("'yaml' module is needed to use beanstalk.chart.py")
- return False
-
- self.conn = self.connect()
-
- return True if self.conn else False
-
- def get_data(self):
- """
- :return: dict
- """
- if not self.is_alive():
- return None
-
- active_charts = self.charts.active_charts()
- data = dict()
-
- try:
- data.update(self.conn.stats())
-
- for tube in self.conn.tubes():
- stats = self.conn.stats_tube(tube)
-
- if tube + '_jobs_rate' not in active_charts:
- self.create_new_tube_charts(tube)
-
- for stat in stats:
- data['_'.join([tube, stat])] = stats[stat]
-
- except beanstalkc.SocketError:
- self.alive = False
- return None
-
- return data or None
-
- def create_new_tube_charts(self, tube):
- order, charts = tube_chart_template(tube)
-
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
- def connect(self):
- host = self.configuration.get('host', '127.0.0.1')
- port = self.configuration.get('port', 11300)
- timeout = self.configuration.get('timeout', 1)
- try:
- return beanstalkc.Connection(host=host,
- port=port,
- connect_timeout=timeout,
- parse_yaml=yaml.load)
- except beanstalkc.SocketError as error:
- self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
- return None
-
- def reconnect(self):
- try:
- self.conn.reconnect()
- self.alive = True
- return True
- except beanstalkc.SocketError:
- return False
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
diff --git a/python.d/bind_rndc.chart.py b/python.d/bind_rndc.chart.py
deleted file mode 100644
index cc96659b2..000000000
--- a/python.d/bind_rndc.chart.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: bind rndc netdata python.d module
-# Author: l2isbad
-
-import os
-
-from collections import defaultdict
-from subprocess import Popen
-
-from bases.collection import find_binary
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 60000
-retries = 60
-update_every = 30
-
-ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
-
-CHARTS = {
- 'name_server_statistics': {
- 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
- 'bind_rndc.name_server_statistics', 'line'],
- 'lines': [
- ['nms_requests', 'requests', 'incremental'],
- ['nms_rejected_queries', 'rejected_queries', 'incremental'],
- ['nms_success', 'success', 'incremental'],
- ['nms_failure', 'failure', 'incremental'],
- ['nms_responses', 'responses', 'incremental'],
- ['nms_duplicate', 'duplicate', 'incremental'],
- ['nms_recursion', 'recursion', 'incremental'],
- ['nms_nxrrset', 'nxrrset', 'incremental'],
- ['nms_nxdomain', 'nxdomain', 'incremental'],
- ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
- ['nms_auth_answer', 'auth_answer', 'incremental'],
- ['nms_dropped_queries', 'dropped_queries', 'incremental'],
- ]},
- 'incoming_queries': {
- 'options': [None, 'Incoming Queries', 'queries', 'incoming queries',
- 'bind_rndc.incoming_queries', 'line'],
- 'lines': [
- ]},
- 'outgoing_queries': {
- 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries',
- 'bind_rndc.outgoing_queries', 'line'],
- 'lines': [
- ]},
- 'named_stats_size': {
- 'options': [None, 'Named Stats File Size', 'MB', 'file size',
- 'bind_rndc.stats_size', 'line'],
- 'lines': [
- ['stats_size', None, 'absolute', 1, 1 << 20]
- ]}
-}
-
-NMS = {
- 'nms_requests':
- ['IPv4 requests received',
- 'IPv6 requests received',
- 'TCP requests received',
- 'requests with EDNS(0) receive'],
- 'nms_responses':
- ['responses sent',
- 'truncated responses sent',
- 'responses with EDNS(0) sent',
- 'requests with unsupported EDNS version received'],
- 'nms_failure':
- ['other query failures',
- 'queries resulted in SERVFAIL'],
- 'nms_auth_answer':
- ['queries resulted in authoritative answer'],
- 'nms_non_auth_answer':
- ['queries resulted in non authoritative answer'],
- 'nms_nxrrset':
- ['queries resulted in nxrrset'],
- 'nms_success':
- ['queries resulted in successful answer'],
- 'nms_nxdomain':
- ['queries resulted in NXDOMAIN'],
- 'nms_recursion':
- ['queries caused recursion'],
- 'nms_duplicate':
- ['duplicate queries received'],
- 'nms_rejected_queries':
- ['auth queries rejected',
- 'recursive queries rejected'],
- 'nms_dropped_queries':
- ['queries dropped']
-}
-
-STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
- self.rndc = find_binary('rndc')
- self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
- nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
- nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
- nms_dropped_queries=0)
-
- def check(self):
- if not self.rndc:
- self.error('Can\'t locate "rndc" binary or binary is not executable by netdata')
- return False
-
- if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)):
- self.error('Cannot access file %s' % self.named_stats_path)
- return False
-
- run_rndc = Popen([self.rndc, 'stats'], shell=False)
- run_rndc.wait()
-
- if not run_rndc.returncode:
- return True
- self.error('Not enough permissions to run "%s stats"' % self.rndc)
- return False
-
- def _get_raw_data(self):
- """
- Run 'rndc stats' and read last dump from named.stats
- :return: dict
- """
- result = dict()
- try:
- current_size = os.path.getsize(self.named_stats_path)
- run_rndc = Popen([self.rndc, 'stats'], shell=False)
- run_rndc.wait()
-
- if run_rndc.returncode:
- return None
- with open(self.named_stats_path) as named_stats:
- named_stats.seek(current_size)
- result['stats'] = named_stats.readlines()
- result['size'] = current_size
- return result
- except (OSError, IOError):
- return None
-
- def _get_data(self):
- """
- Parse data from _get_raw_data()
- :return: dict
- """
-
- raw_data = self._get_raw_data()
-
- if raw_data is None:
- return None
- parsed = dict()
- for stat in STATS:
- parsed[stat] = parse_stats(field=stat,
- named_stats=raw_data['stats'])
-
- self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
-
- for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
- parsed_key, chart_name = elem[0], elem[1]
- for dimension_id, value in queries_mapper(data=parsed[parsed_key],
- add=chart_name[:9]).items():
-
- if dimension_id not in self.data:
- dimension = dimension_id.replace(chart_name[:9], '')
- if dimension_id not in self.charts[chart_name]:
- self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental'])
-
- self.data[dimension_id] = value
-
- self.data['stats_size'] = raw_data['size']
- return self.data
-
-
-def parse_stats(field, named_stats):
- """
- :param field: str:
- :param named_stats: list:
- :return: dict
-
- Example:
- filed: 'Incoming Queries'
- names_stats (list of lines):
- ++ Incoming Requests ++
- 1405660 QUERY
- 3 NOTIFY
- ++ Incoming Queries ++
- 1214961 A
- 75 NS
- 2 CNAME
- 2897 SOA
- 35544 PTR
- 14 MX
- 5822 TXT
- 145974 AAAA
- 371 SRV
- ++ Outgoing Queries ++
- ...
-
- result:
- {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
- """
- data = dict()
- ns = iter(named_stats)
- for line in ns:
- if field not in line:
- continue
- while True:
- try:
- line = next(ns)
- except StopIteration:
- break
- if '++' not in line:
- if '[' in line:
- continue
- v, k = line.strip().split(' ', 1)
- data[k] = int(v)
- continue
- break
- break
- return data
-
-
-def nms_mapper(data):
- """
- :param data: dict
- :return: dict(defaultdict)
- """
- result = defaultdict(int)
- for k, v in NMS.items():
- for elem in v:
- result[k] += data.get(elem, 0)
- return result
-
-
-def queries_mapper(data, add):
- """
- :param data: dict
- :param add: str
- :return: dict
- """
- return dict([(add + k, v) for k, v in data.items()])
diff --git a/python.d/ceph.chart.py b/python.d/ceph.chart.py
deleted file mode 100644
index fb78397d0..000000000
--- a/python.d/ceph.chart.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: ceph netdata python.d module
-# Author: Luis Eduardo (lets00)
-
-try:
- import rados
- CEPH = True
-except ImportError:
- CEPH = False
-
-import json
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-update_every = 10
-priority = 60000
-retries = 60
-
-ORDER = ['general_usage', 'general_objects', 'general_bytes', 'general_operations',
- 'general_latency', 'pool_usage', 'pool_objects', 'pool_read_bytes',
- 'pool_write_bytes', 'pool_read_operations', 'pool_write_operations', 'osd_usage',
- 'osd_apply_latency', 'osd_commit_latency']
-
-CHARTS = {
- 'general_usage': {
- 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
- 'lines': [
- ['general_available', 'avail', 'absolute', 1, 1024],
- ['general_usage', 'used', 'absolute', 1, 1024]
- ]
- },
- 'general_objects': {
- 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
- 'lines': [
- ['general_objects', 'cluster', 'absolute']
- ]
- },
- 'general_bytes': {
- 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
- 'area'],
- 'lines': [
- ['general_read_bytes', 'read', 'absolute', 1, 1024],
- ['general_write_bytes', 'write', 'absolute', -1, 1024]
- ]
- },
- 'general_operations': {
- 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
- 'area'],
- 'lines': [
- ['general_read_operations', 'read', 'absolute', 1],
- ['general_write_operations', 'write', 'absolute', -1]
- ]
- },
- 'general_latency': {
- 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
- 'area'],
- 'lines': [
- ['general_apply_latency', 'apply', 'absolute'],
- ['general_commit_latency', 'commit', 'absolute']
- ]
- },
- 'pool_usage': {
- 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
- 'lines': []
- },
- 'pool_objects': {
- 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
- 'lines': []
- },
- 'pool_read_bytes': {
- 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
- 'lines': []
- },
- 'pool_write_bytes': {
- 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
- 'lines': []
- },
- 'pool_read_operations': {
- 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
- 'lines': []
- },
- 'pool_write_operations': {
- 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
- 'lines': []
- },
- 'osd_usage': {
- 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
- 'lines': []
- },
- 'osd_apply_latency': {
- 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
- 'lines': []
- },
- 'osd_commit_latency': {
- 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
- 'lines': []
- }
-
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.config_file = self.configuration.get('config_file')
- self.keyring_file = self.configuration.get('keyring_file')
-
- def check(self):
- """
- Checks module
- :return:
- """
- if not CEPH:
- self.error('rados module is needed to use ceph.chart.py')
- return False
- if not (self.config_file and self.keyring_file):
- self.error('config_file and/or keyring_file is not defined')
- return False
- try:
- self.cluster = rados.Rados(conffile=self.config_file,
- conf=dict(keyring=self.keyring_file))
- self.cluster.connect()
- except rados.Error as error:
- self.error(error)
- return False
- self.create_definitions()
- return True
-
- def create_definitions(self):
- """
- Create dynamically charts options
- :return: None
- """
- # Pool lines
- for pool in sorted(self._get_df()['pools']):
- self.definitions['pool_usage']['lines'].append([pool['name'],
- pool['name'],
- 'absolute'])
- self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
-
- # OSD lines
- for osd in sorted(self._get_osd_df()['nodes']):
- self.definitions['osd_usage']['lines'].append([osd['name'],
- osd['name'],
- 'absolute'])
- self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
- self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
-
- def get_data(self):
- """
- Catch all ceph data
- :return: dict
- """
- try:
- data = {}
- df = self._get_df()
- osd_df = self._get_osd_df()
- osd_perf = self._get_osd_perf()
- pool_stats = self._get_osd_pool_stats()
- data.update(self._get_general(osd_perf, pool_stats))
- for pool in df['pools']:
- data.update(self._get_pool_usage(pool))
- data.update(self._get_pool_objects(pool))
- for pool_io in pool_stats:
- data.update(self._get_pool_rw(pool_io))
- for osd in osd_df['nodes']:
- data.update(self._get_osd_usage(osd))
- for osd_apply_commit in osd_perf['osd_perf_infos']:
- data.update(self._get_osd_latency(osd_apply_commit))
- return data
- except (ValueError, AttributeError) as error:
- self.error(error)
- return None
-
- def _get_general(self, osd_perf, pool_stats):
- """
- Get ceph's general usage
- :return: dict
- """
- status = self.cluster.get_cluster_stats()
- read_bytes_sec = 0
- write_bytes_sec = 0
- read_op_per_sec = 0
- write_op_per_sec = 0
- apply_latency = 0
- commit_latency = 0
-
- for pool_rw_io_b in pool_stats:
- read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
- write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
- read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
- write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
- for perf in osd_perf['osd_perf_infos']:
- apply_latency += perf['perf_stats']['apply_latency_ms']
- commit_latency += perf['perf_stats']['commit_latency_ms']
-
- return {'general_usage': int(status['kb_used']),
- 'general_available': int(status['kb_avail']),
- 'general_objects': int(status['num_objects']),
- 'general_read_bytes': read_bytes_sec,
- 'general_write_bytes': write_bytes_sec,
- 'general_read_operations': read_op_per_sec,
- 'general_write_operations': write_op_per_sec,
- 'general_apply_latency': apply_latency,
- 'general_commit_latency': commit_latency
- }
-
- @staticmethod
- def _get_pool_usage(pool):
- """
- Process raw data into pool usage dict information
- :return: A pool dict with pool name's key and usage bytes' value
- """
- return {pool['name']: pool['stats']['kb_used']}
-
- @staticmethod
- def _get_pool_objects(pool):
- """
- Process raw data into pool usage dict information
- :return: A pool dict with pool name's key and object numbers
- """
- return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
-
- @staticmethod
- def _get_pool_rw(pool):
- """
- Get read/write kb and operations in a pool
- :return: A pool dict with both read/write bytes and operations.
- """
- return {'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
- 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
- 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
- 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
- }
-
- @staticmethod
- def _get_osd_usage(osd):
- """
- Process raw data into osd dict information to get osd usage
- :return: A osd dict with osd name's key and usage bytes' value
- """
- return {osd['name']: float(osd['kb_used'])}
-
- @staticmethod
- def _get_osd_latency(osd):
- """
- Get ceph osd apply and commit latency
- :return: A osd dict with osd name's key with both apply and commit latency values
- """
- return {'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
- 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']}
-
- def _get_df(self):
- """
- Get ceph df output
- :return: ceph df --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'df',
- 'format': 'json'
- }), '')[1])
-
- def _get_osd_df(self):
- """
- Get ceph osd df output
- :return: ceph osd df --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd df',
- 'format': 'json'
- }), '')[1])
-
- def _get_osd_perf(self):
- """
- Get ceph osd performance
- :return: ceph osd perf --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd perf',
- 'format': 'json'
- }), '')[1])
-
- def _get_osd_pool_stats(self):
- """
- Get ceph osd pool status.
- This command is used to get information about both
- read/write operation and bytes per second on each pool
- :return: ceph osd pool stats --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd pool stats',
- 'format': 'json'
- }), '')[1])
diff --git a/python.d/chrony.chart.py b/python.d/chrony.chart.py
deleted file mode 100644
index 8f331fa50..000000000
--- a/python.d/chrony.chart.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: chrony netdata python.d module
-# Author: Dominik Schloesser (domschl)
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-priority = 60000
-retries = 10
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
-
-CHARTS = {
- # id: {
- # 'options': [name, title, units, family, context, charttype],
- # 'lines': [
- # [unique_dimension_name, name, algorithm, multiplier, divisor]
- # ]}
- 'system': {
- 'options': [None, "Chrony System Time Deltas", "microseconds", 'system', 'chrony.system', 'area'],
- 'lines': [
- ['timediff', 'system time', 'absolute', 1, 1000]
- ]},
- 'offsets': {
- 'options': [None, "Chrony System Time Offsets", "microseconds", 'system', 'chrony.offsets', 'area'],
- 'lines': [
- ['lastoffset', 'last offset', 'absolute', 1, 1000],
- ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
- ]},
- 'stratum': {
- 'options': [None, "Chrony Stratum", "stratum", 'root', 'chrony.stratum', 'line'],
- 'lines': [
- ['stratum', None, 'absolute', 1, 1]
- ]},
- 'root': {
- 'options': [None, "Chrony Root Delays", "milliseconds", 'root', 'chrony.root', 'line'],
- 'lines': [
- ['rootdelay', 'delay', 'absolute', 1, 1000000],
- ['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
- ]},
- 'frequency': {
- 'options': [None, "Chrony Frequency", "ppm", 'frequencies', 'chrony.frequency', 'area'],
- 'lines': [
- ['frequency', None, 'absolute', 1, 1000]
- ]},
- 'residualfreq': {
- 'options': [None, "Chrony Residual frequency", "ppm", 'frequencies', 'chrony.residualfreq', 'area'],
- 'lines': [
- ['residualfreq', 'residual frequency', 'absolute', 1, 1000]
- ]},
- 'skew': {
- 'options': [None, "Chrony Skew, error bound on frequency", "ppm", 'frequencies', 'chrony.skew', 'area'],
- 'lines': [
- ['skew', None, 'absolute', 1, 1000]
- ]}
-}
-
-CHRONY = [('Frequency', 'frequency', 1e3),
- ('Last offset', 'lastoffset', 1e9),
- ('RMS offset', 'rmsoffset', 1e9),
- ('Residual freq', 'residualfreq', 1e3),
- ('Root delay', 'rootdelay', 1e9),
- ('Root dispersion', 'rootdispersion', 1e9),
- ('Skew', 'skew', 1e3),
- ('Stratum', 'stratum', 1),
- ('System time', 'timediff', 1e9)]
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(
- self, configuration=configuration, name=name)
- self.command = "chronyc -n tracking"
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- raw_data = (line.split(':', 1) for line in raw_data)
- parsed, data = dict(), dict()
-
- for line in raw_data:
- try:
- key, value = (l.strip() for l in line)
- except ValueError:
- continue
- if value:
- parsed[key] = value.split()[0]
-
- for key, dim_id, multiplier in CHRONY:
- try:
- data[dim_id] = int(float(parsed[key]) * multiplier)
- except (KeyError, ValueError):
- continue
-
- return data or None
diff --git a/python.d/couchdb.chart.py b/python.d/couchdb.chart.py
deleted file mode 100644
index 558bac587..000000000
--- a/python.d/couchdb.chart.py
+++ /dev/null
@@ -1,410 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: couchdb netdata python.d module
-# Author: wohali <wohali@apache.org>
-# Thanks to l2isbad for good examples :)
-
-from collections import namedtuple, defaultdict
-from json import loads
-from threading import Thread
-from socket import gethostbyname, gaierror
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
-
-OVERVIEW_STATS = [
- 'couchdb.database_reads.value',
- 'couchdb.database_writes.value',
- 'couchdb.httpd.view_reads.value'
- 'couchdb.httpd_request_methods.COPY.value',
- 'couchdb.httpd_request_methods.DELETE.value',
- 'couchdb.httpd_request_methods.GET.value',
- 'couchdb.httpd_request_methods.HEAD.value',
- 'couchdb.httpd_request_methods.OPTIONS.value',
- 'couchdb.httpd_request_methods.POST.value',
- 'couchdb.httpd_request_methods.PUT.value',
- 'couchdb.httpd_status_codes.200.value',
- 'couchdb.httpd_status_codes.201.value',
- 'couchdb.httpd_status_codes.202.value',
- 'couchdb.httpd_status_codes.204.value',
- 'couchdb.httpd_status_codes.206.value',
- 'couchdb.httpd_status_codes.301.value',
- 'couchdb.httpd_status_codes.302.value',
- 'couchdb.httpd_status_codes.304.value',
- 'couchdb.httpd_status_codes.400.value',
- 'couchdb.httpd_status_codes.401.value',
- 'couchdb.httpd_status_codes.403.value',
- 'couchdb.httpd_status_codes.404.value',
- 'couchdb.httpd_status_codes.405.value',
- 'couchdb.httpd_status_codes.406.value',
- 'couchdb.httpd_status_codes.409.value',
- 'couchdb.httpd_status_codes.412.value',
- 'couchdb.httpd_status_codes.413.value',
- 'couchdb.httpd_status_codes.414.value',
- 'couchdb.httpd_status_codes.415.value',
- 'couchdb.httpd_status_codes.416.value',
- 'couchdb.httpd_status_codes.417.value',
- 'couchdb.httpd_status_codes.500.value',
- 'couchdb.httpd_status_codes.501.value',
- 'couchdb.open_os_files.value',
- 'couch_replicator.jobs.running.value',
- 'couch_replicator.jobs.pending.value',
- 'couch_replicator.jobs.crashed.value',
-]
-
-SYSTEM_STATS = [
- 'context_switches',
- 'run_queue',
- 'ets_table_count',
- 'reductions',
- 'memory.atom',
- 'memory.atom_used',
- 'memory.binary',
- 'memory.code',
- 'memory.ets',
- 'memory.other',
- 'memory.processes',
- 'io_input',
- 'io_output',
- 'os_proc_count',
- 'process_count',
- 'internal_replication_jobs'
-]
-
-DB_STATS = [
- 'doc_count',
- 'doc_del_count',
- 'sizes.file',
- 'sizes.external',
- 'sizes.active'
-]
-
-ORDER = [
- 'activity',
- 'request_methods',
- 'response_codes',
- 'active_tasks',
- 'replicator_jobs',
- 'open_files',
- 'db_sizes_file',
- 'db_sizes_external',
- 'db_sizes_active',
- 'db_doc_counts',
- 'db_doc_del_counts',
- 'erlang_memory',
- 'erlang_proc_counts',
- 'erlang_peak_msg_queue',
- 'erlang_reductions'
-]
-
-CHARTS = {
- 'activity': {
- 'options': [None, 'Overall Activity', 'req/s',
- 'dbactivity', 'couchdb.activity', 'stacked'],
- 'lines': [
- ['couchdb_database_reads', 'DB reads', 'incremental'],
- ['couchdb_database_writes', 'DB writes', 'incremental'],
- ['couchdb_httpd_view_reads', 'View reads', 'incremental']
- ]
- },
- 'request_methods': {
- 'options': [None, 'HTTP request methods', 'req/s',
- 'httptraffic', 'couchdb.request_methods',
- 'stacked'],
- 'lines': [
- ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'],
- ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'],
- ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
- ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
- ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
- 'incremental'],
- ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
- ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
- ]
- },
- 'response_codes': {
- 'options': [None, 'HTTP response status codes', 'resp/s',
- 'httptraffic', 'couchdb.response_codes',
- 'stacked'],
- 'lines': [
- ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'],
- ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
- ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
- ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
- 'incremental'],
- ['couchdb_httpd_status_codes_3xx', '3xx Redirection',
- 'incremental'],
- ['couchdb_httpd_status_codes_4xx', '4xx Client error',
- 'incremental'],
- ['couchdb_httpd_status_codes_5xx', '5xx Server error',
- 'incremental']
- ]
- },
- 'open_files': {
- 'options': [None, 'Open files', 'files',
- 'ops', 'couchdb.open_files', 'line'],
- 'lines': [
- ['couchdb_open_os_files', '# files', 'absolute']
- ]
- },
- 'active_tasks': {
- 'options': [None, 'Active task breakdown', 'tasks',
- 'ops', 'couchdb.active_tasks', 'stacked'],
- 'lines': [
- ['activetasks_indexer', 'Indexer', 'absolute'],
- ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
- ['activetasks_replication', 'Replication', 'absolute'],
- ['activetasks_view_compaction', 'View Compaction', 'absolute']
- ]
- },
- 'replicator_jobs': {
- 'options': [None, 'Replicator job breakdown', 'jobs',
- 'ops', 'couchdb.replicator_jobs', 'stacked'],
- 'lines': [
- ['couch_replicator_jobs_running', 'Running', 'absolute'],
- ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
- ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'],
- ['internal_replication_jobs', 'Internal replication jobs',
- 'absolute']
- ]
- },
- 'erlang_memory': {
- 'options': [None, 'Erlang VM memory usage', 'bytes',
- 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
- 'lines': [
- ['memory_atom', 'atom', 'absolute'],
- ['memory_binary', 'binaries', 'absolute'],
- ['memory_code', 'code', 'absolute'],
- ['memory_ets', 'ets', 'absolute'],
- ['memory_processes', 'procs', 'absolute'],
- ['memory_other', 'other', 'absolute']
- ]
- },
- 'erlang_reductions': {
- 'options': [None, 'Erlang reductions', 'count',
- 'erlang', 'couchdb.reductions', 'line'],
- 'lines': [
- ['reductions', 'reductions', 'incremental']
- ]
- },
- 'erlang_proc_counts': {
- 'options': [None, 'Process counts', 'count',
- 'erlang', 'couchdb.proccounts', 'line'],
- 'lines': [
- ['os_proc_count', 'OS procs', 'absolute'],
- ['process_count', 'erl procs', 'absolute']
- ]
- },
- 'erlang_peak_msg_queue': {
- 'options': [None, 'Peak message queue size', 'count',
- 'erlang', 'couchdb.peakmsgqueue',
- 'line'],
- 'lines': [
- ['peak_msg_queue', 'peak size', 'absolute']
- ]
- },
- # Lines for the following are added as part of check()
- 'db_sizes_file': {
- 'options': [None, 'Database sizes (file)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_file', 'line'],
- 'lines': []
- },
- 'db_sizes_external': {
- 'options': [None, 'Database sizes (external)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_external', 'line'],
- 'lines': []
- },
- 'db_sizes_active': {
- 'options': [None, 'Database sizes (active)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_active', 'line'],
- 'lines': []
- },
- 'db_doc_counts': {
- 'options': [None, 'Database # of docs', 'docs',
- 'perdbstats', 'couchdb_db_doc_count', 'line'],
- 'lines': []
- },
- 'db_doc_del_counts': {
- 'options': [None, 'Database # of deleted docs', 'docs',
- 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
- 'lines': []
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 5984)
- self.node = self.configuration.get('node', 'couchdb@127.0.0.1')
- self.scheme = self.configuration.get('scheme', 'http')
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- try:
- self.dbs = self.configuration.get('databases').split(' ')
- except (KeyError, AttributeError):
- self.dbs = []
-
- def check(self):
- if not (self.host and self.port):
- self.error('Host is not defined in the module configuration file')
- return False
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(str(error))
- return False
- self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme,
- host=self.host,
- port=self.port)
- stats = self.url + '/_node/{node}/_stats'.format(node=self.node)
- active_tasks = self.url + '/_active_tasks'
- system = self.url + '/_node/{node}/_system'.format(node=self.node)
- self.methods = [METHODS(get_data=self._get_overview_stats,
- url=stats,
- stats=OVERVIEW_STATS),
- METHODS(get_data=self._get_active_tasks_stats,
- url=active_tasks,
- stats=None),
- METHODS(get_data=self._get_overview_stats,
- url=system,
- stats=SYSTEM_STATS),
- METHODS(get_data=self._get_dbs_stats,
- url=self.url,
- stats=DB_STATS)]
- # must initialise manager before using _get_raw_data
- self._manager = self._build_manager()
- self.dbs = [db for db in self.dbs
- if self._get_raw_data(self.url + '/' + db)]
- for db in self.dbs:
- self.definitions['db_sizes_file']['lines'].append(
- ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000]
- )
- self.definitions['db_sizes_external']['lines'].append(
- ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000]
- )
- self.definitions['db_sizes_active']['lines'].append(
- ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000]
- )
- self.definitions['db_doc_counts']['lines'].append(
- ['db_'+db+'_doc_count', db, 'absolute']
- )
- self.definitions['db_doc_del_counts']['lines'].append(
- ['db_'+db+'_doc_del_count', db, 'absolute']
- )
- return UrlService.check(self)
-
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
-
- for method in self.methods:
- th = Thread(target=method.get_data,
- args=(queue, method.url, method.stats))
- th.start()
- threads.append(th)
-
- for thread in threads:
- thread.join()
- result.update(queue.get())
-
- # self.info('couchdb result = ' + str(result))
- return result or None
-
- def _get_overview_stats(self, queue, url, stats):
- raw_data = self._get_raw_data(url)
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- to_netdata = self._fetch_data(raw_data=data, metrics=stats)
- if 'message_queues' in data:
- to_netdata['peak_msg_queue'] = get_peak_msg_queue(data)
- return queue.put(to_netdata)
-
- def _get_active_tasks_stats(self, queue, url, _):
- taskdict = defaultdict(int)
- taskdict["activetasks_indexer"] = 0
- taskdict["activetasks_database_compaction"] = 0
- taskdict["activetasks_replication"] = 0
- taskdict["activetasks_view_compaction"] = 0
- raw_data = self._get_raw_data(url)
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- for task in data:
- taskdict["activetasks_" + task["type"]] += 1
- return queue.put(dict(taskdict))
-
- def _get_dbs_stats(self, queue, url, stats):
- to_netdata = {}
- for db in self.dbs:
- raw_data = self._get_raw_data(url + '/' + db)
- if not raw_data:
- continue
- data = loads(raw_data)
- for metric in stats:
- value = data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError as e:
- self.debug('cannot process ' + metric + ' for ' + db
- + ": " + str(e))
- continue
- metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list))
- to_netdata[metric_name] = value
- return queue.put(to_netdata)
-
- def _fetch_data(self, raw_data, metrics):
- data = dict()
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError as e:
- self.debug('cannot process ' + metric + ': ' + str(e))
- continue
- # strip off .value from end of stat
- if metrics_list[-1] == 'value':
- metrics_list = metrics_list[:-1]
- # sum up 3xx/4xx/5xx
- if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \
- int(metrics_list[2]) > 202:
- metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100)
- if '_'.join(metrics_list) in data:
- data['_'.join(metrics_list)] += value
- else:
- data['_'.join(metrics_list)] = value
- else:
- data['_'.join(metrics_list)] = value
- return data
-
-
-def get_peak_msg_queue(data):
- maxsize = 0
- queues = data['message_queues']
- for queue in iter(queues.values()):
- if isinstance(queue, dict) and 'count' in queue:
- value = queue['count']
- elif isinstance(queue, int):
- value = queue
- else:
- continue
- maxsize = max(maxsize, value)
- return maxsize
diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py
deleted file mode 100644
index 3abde736c..000000000
--- a/python.d/cpufreq.chart.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: cpufreq netdata python.d module
-# Author: Pawel Krupa (paulfantom) and Steven Noonan (tycho)
-
-import glob
-import os
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-
-ORDER = ['cpufreq']
-
-CHARTS = {
- 'cpufreq': {
- 'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]}
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- prefix = os.getenv('NETDATA_HOST_PREFIX', "")
- if prefix.endswith('/'):
- prefix = prefix[:-1]
- self.sys_dir = prefix + "/sys/devices"
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.fake_name = 'cpu'
- self.assignment = {}
- self.accurate_exists = True
- self.accurate_last = {}
-
- def _get_data(self):
- data = {}
-
- if self.accurate_exists:
- accurate_ok = True
-
- for name, paths in self.assignment.items():
- last = self.accurate_last[name]
-
- current = {}
- deltas = {}
- ticks_since_last = 0
-
- for line in open(paths['accurate'], 'r'):
- line = list(map(int, line.split()))
- current[line[0]] = line[1]
- ticks = line[1] - last.get(line[0], 0)
- ticks_since_last += ticks
- deltas[line[0]] = line[1] - last.get(line[0], 0)
-
- avg_freq = 0
- if ticks_since_last != 0:
- for frequency, ticks in deltas.items():
- avg_freq += frequency * ticks
- avg_freq /= ticks_since_last
-
- data[name] = avg_freq
- self.accurate_last[name] = current
- if avg_freq == 0 or ticks_since_last == 0:
- # Delta is either too large or nonexistent, fall back to
- # less accurate reading. This can happen if we switch
- # to/from the 'schedutil' governor, which doesn't report
- # stats.
- accurate_ok = False
-
- if accurate_ok:
- return data
-
- for name, paths in self.assignment.items():
- data[name] = open(paths['inaccurate'], 'r').read()
-
- return data
-
- def check(self):
- try:
- self.sys_dir = str(self.configuration['sys_dir'])
- except (KeyError, TypeError):
- self.error("No path specified. Using: '" + self.sys_dir + "'")
-
- for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/stats/time_in_state'):
- path_elem = path.split('/')
- cpu = path_elem[-4]
- if cpu not in self.assignment:
- self.assignment[cpu] = {}
- self.assignment[cpu]['accurate'] = path
- self.accurate_last[cpu] = {}
-
- if len(self.assignment) == 0:
- self.accurate_exists = False
-
- for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'):
- path_elem = path.split('/')
- cpu = path_elem[-3]
- if cpu not in self.assignment:
- self.assignment[cpu] = {}
- self.assignment[cpu]['inaccurate'] = path
-
- if len(self.assignment) == 0:
- self.error("couldn't find a method to read cpufreq statistics")
- return False
-
- for name in sorted(self.assignment, key=lambda v: int(v[3:])):
- self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
-
- return True
-
diff --git a/python.d/cpuidle.chart.py b/python.d/cpuidle.chart.py
deleted file mode 100644
index d14c6aaf3..000000000
--- a/python.d/cpuidle.chart.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: cpuidle netdata python.d module
-# Author: Steven Noonan (tycho)
-
-import glob
-import os
-import platform
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-import ctypes
-syscall = ctypes.CDLL('libc.so.6').syscall
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- prefix = os.getenv('NETDATA_HOST_PREFIX', "")
- if prefix.endswith('/'):
- prefix = prefix[:-1]
- self.sys_dir = prefix + "/sys/devices/system/cpu"
- self.schedstat_path = prefix + "/proc/schedstat"
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = []
- self.definitions = {}
- self.fake_name = 'cpu'
- self.assignment = {}
- self.last_schedstat = None
-
- @staticmethod
- def __gettid():
- # This is horrendous. We need the *thread id* (not the *process id*),
- # but there's no Python standard library way of doing that. If you need
- # to enable this module on a non-x86 machine type, you'll have to find
- # the Linux syscall number for gettid() and add it to the dictionary
- # below.
- syscalls = {
- 'i386': 224,
- 'x86_64': 186,
- }
- if platform.machine() not in syscalls:
- return None
- tid = syscall(syscalls[platform.machine()])
- return tid
-
- def __wake_cpus(self, cpus):
- # Requires Python 3.3+. This will "tickle" each CPU to force it to
- # update its idle counters.
- if hasattr(os, 'sched_setaffinity'):
- pid = self.__gettid()
- save_affinity = os.sched_getaffinity(pid)
- for idx in cpus:
- os.sched_setaffinity(pid, [idx])
- os.sched_getaffinity(pid)
- os.sched_setaffinity(pid, save_affinity)
-
- def __read_schedstat(self):
- cpus = {}
- for line in open(self.schedstat_path, 'r'):
- if not line.startswith('cpu'):
- continue
- line = line.rstrip().split()
- cpu = line[0]
- active_time = line[7]
- cpus[cpu] = int(active_time) // 1000
- return cpus
-
- def _get_data(self):
- results = {}
-
- # Use the kernel scheduler stats to determine how much time was spent
- # in C0 (active).
- schedstat = self.__read_schedstat()
-
- # Determine if any of the CPUs are idle. If they are, then we need to
- # tickle them in order to update their C-state residency statistics.
- if self.last_schedstat is None:
- needs_tickle = list(self.assignment.keys())
- else:
- needs_tickle = []
- for cpu, active_time in self.last_schedstat.items():
- delta = schedstat[cpu] - active_time
- if delta < 1:
- needs_tickle.append(cpu)
-
- if needs_tickle:
- # This line is critical for the stats to update. If we don't "tickle"
- # idle CPUs, then the counters for those CPUs stop counting.
- self.__wake_cpus([int(cpu[3:]) for cpu in needs_tickle])
-
- # Re-read schedstat now that we've tickled any idlers.
- schedstat = self.__read_schedstat()
-
- self.last_schedstat = schedstat
-
- for cpu, metrics in self.assignment.items():
- update_time = schedstat[cpu]
- results[cpu + '_active_time'] = update_time
-
- for metric, path in metrics.items():
- residency = int(open(path, 'r').read())
- results[metric] = residency
-
- return results
-
- def check(self):
- if self.__gettid() is None:
- self.error("Cannot get thread ID. Stats would be completely broken.")
- return False
-
- for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
- # ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
- path_elem = path.split('/')
- cpu = path_elem[-4]
- state = path_elem[-2]
- statename = open(path, 'rt').read().rstrip()
-
- orderid = '%s_cpuidle' % (cpu,)
- if orderid not in self.definitions:
- self.order.append(orderid)
- active_name = '%s_active_time' % (cpu,)
- self.definitions[orderid] = {
- 'options': [None, 'C-state residency', 'time%', 'cpuidle', 'cpuidle.cpuidle', 'stacked'],
- 'lines': [
- [active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
- ],
- }
- self.assignment[cpu] = {}
-
- defid = '%s_%s_time' % (orderid, state)
-
- self.definitions[orderid]['lines'].append(
- [defid, statename, 'percentage-of-incremental-row', 1, 1]
- )
-
- self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
-
- # Sort order by kernel-specified CPU index
- self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
-
- if len(self.definitions) == 0:
- self.error("couldn't find cstate stats")
- return False
-
- return True
-
diff --git a/python.d/dns_query_time.chart.py b/python.d/dns_query_time.chart.py
deleted file mode 100644
index 9a794a9c9..000000000
--- a/python.d/dns_query_time.chart.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: dns_query_time netdata python.d module
-# Author: l2isbad
-
-from random import choice
-from threading import Thread
-from socket import getaddrinfo, gaierror
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-try:
- import dns.message, dns.query, dns.name
- DNS_PYTHON = True
-except ImportError:
- DNS_PYTHON = False
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-priority = 60000
-retries = 60
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.timeout = self.configuration.get('response_timeout', 4)
- self.aggregate = self.configuration.get('aggregate', True)
- self.domains = self.configuration.get('domains')
- self.server_list = self.configuration.get('dns_servers')
-
- def check(self):
- if not DNS_PYTHON:
- self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
- return False
-
- self.timeout = self.timeout if isinstance(self.timeout, int) else 4
-
- if not all([self.domains, self.server_list,
- isinstance(self.server_list, str), isinstance(self.domains, str)]):
- self.error('server_list and domain_list can\'t be empty')
- return False
- else:
- self.domains, self.server_list = self.domains.split(), self.server_list.split()
-
- for ns in self.server_list:
- if not check_ns(ns):
- self.info('Bad NS: %s' % ns)
- self.server_list.remove(ns)
- if not self.server_list:
- return False
-
- data = self._get_data(timeout=1)
-
- down_servers = [s for s in data if data[s] == -100]
- for down in down_servers:
- down = down[3:].replace('_', '.')
- self.info('Removed due to non response %s' % down)
- self.server_list.remove(down)
- if not self.server_list:
- return False
-
- self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
- return True
-
- def _get_data(self, timeout=None):
- return dns_request(self.server_list, timeout or self.timeout, self.domains)
-
-
-def dns_request(server_list, timeout, domains):
- threads = list()
- que = Queue()
- result = dict()
-
- def dns_req(ns, t, q):
- domain = dns.name.from_text(choice(domains))
- request = dns.message.make_query(domain, dns.rdatatype.A)
-
- try:
- dns_start = time()
- dns.query.udp(request, ns, timeout=t)
- dns_end = time()
- query_time = round((dns_end - dns_start) * 1000)
- q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
- except dns.exception.Timeout:
- q.put({'_'.join(['ns', ns.replace('.', '_')]): -100})
-
- for server in server_list:
- th = Thread(target=dns_req, args=(server, timeout, que))
- th.start()
- threads.append(th)
-
- for th in threads:
- th.join()
- result.update(que.get())
-
- return result
-
-
-def check_ns(ns):
- try:
- return getaddrinfo(ns, 'domain')[0][4][0]
- except gaierror:
- return False
-
-
-def create_charts(aggregate, server_list):
- if aggregate:
- order = ['dns_group']
- definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers',
- 'dns_query_time.response_time', 'line'], 'lines': []}}
- for ns in server_list:
- definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
-
- return order, definitions
- else:
- order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
- definitions = dict()
- for ns in server_list:
- definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns,
- 'dns_query_time.response_time', 'area'],
- 'lines': [['_'.join(['ns', ns.replace('.', '_')]),
- ns, 'absolute']]}
- return order, definitions
diff --git a/python.d/dnsdist.chart.py b/python.d/dnsdist.chart.py
deleted file mode 100644
index b40112cbc..000000000
--- a/python.d/dnsdist.chart.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# -*- coding: utf-8 -*-
-from json import loads
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = ['queries', 'queries_dropped', 'packets_dropped', 'answers', 'backend_responses', 'backend_commerrors', 'backend_errors', 'cache', 'servercpu', 'servermem', 'query_latency', 'query_latency_avg']
-CHARTS = {
- 'queries': {
- 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
- 'lines': [
- ['queries', 'all', 'incremental'],
- ['rdqueries', 'recursive', 'incremental'],
- ['empty-queries', 'empty', 'incremental']
- ]},
- 'queries_dropped': {
- 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
- 'lines': [
- ['rule-drop', 'rule drop', 'incremental'],
- ['dyn-blocked', 'dynamic block', 'incremental'],
- ['no-policy', 'no policy', 'incremental'],
- ['noncompliant-queries', 'non compliant', 'incremental']
- ]},
- 'packets_dropped': {
- 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
- 'lines': [
- ['acl-drops', 'acl', 'incremental']
- ]},
- 'answers': {
- 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
- 'lines': [
- ['self-answered', 'self answered', 'incremental'],
- ['rule-nxdomain', 'nxdomain', 'incremental', -1],
- ['rule-refused', 'refused', 'incremental', -1],
- ['trunc-failures', 'trunc failures', 'incremental', -1]
- ]},
- 'backend_responses': {
- 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
- 'lines': [
- ['responses', 'responses', 'incremental']
- ]},
- 'backend_commerrors': {
- 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
- 'lines': [
- ['downstream-send-errors', 'send errors', 'incremental']
- ]},
- 'backend_errors': {
- 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
- 'lines': [
- ['downstream-timeouts', 'timeout', 'incremental'],
- ['servfail-responses', 'servfail', 'incremental'],
- ['noncompliant-responses', 'non compliant', 'incremental']
- ]},
- 'cache': {
- 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
- 'lines': [
- ['cache-hits', 'hits', 'incremental'],
- ['cache-misses', 'misses', 'incremental', -1]
- ]},
- 'servercpu': {
- 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
- 'lines': [
- ['cpu-sys-msec', 'system state', 'incremental'],
- ['cpu-user-msec', 'user state', 'incremental']
- ]},
- 'servermem': {
- 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
- 'lines': [
- ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
- ]},
- 'query_latency': {
- 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
- 'lines': [
- ['latency0-1', '1ms', 'incremental'],
- ['latency1-10', '10ms', 'incremental'],
- ['latency10-50', '50ms', 'incremental'],
- ['latency50-100', '100ms', 'incremental'],
- ['latency100-1000', '1sec', 'incremental'],
- ['latency-slow', 'slow', 'incremental']
- ]},
- 'query_latency_avg': {
- 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency', 'dnsdist.query_latency_avg', 'line'],
- 'lines': [
- ['latency-avg100', '100', 'absolute'],
- ['latency-avg1000', '1k', 'absolute'],
- ['latency-avg10000', '10k', 'absolute'],
- ['latency-avg1000000', '1000k', 'absolute']
- ]}
-}
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- data = self._get_raw_data()
- if not data:
- return None
-
- return loads(data)
-
diff --git a/python.d/dovecot.chart.py b/python.d/dovecot.chart.py
deleted file mode 100644
index 5689f2ec9..000000000
--- a/python.d/dovecot.chart.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: dovecot netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['sessions', 'logins', 'commands',
- 'faults',
- 'context_switches',
- 'io', 'net', 'syscalls',
- 'lookup', 'cache',
- 'auth', 'auth_cache']
-
-CHARTS = {
- 'sessions': {
- 'options': [None, "Dovecot Active Sessions", 'number', 'sessions', 'dovecot.sessions', 'line'],
- 'lines': [
- ['num_connected_sessions', 'active sessions', 'absolute']
- ]},
- 'logins': {
- 'options': [None, "Dovecot Logins", 'number', 'logins', 'dovecot.logins', 'line'],
- 'lines': [
- ['num_logins', 'logins', 'absolute']
- ]},
- 'commands': {
- 'options': [None, "Dovecot Commands", "commands", 'commands', 'dovecot.commands', 'line'],
- 'lines': [
- ['num_cmds', 'commands', 'absolute']
- ]},
- 'faults': {
- 'options': [None, "Dovecot Page Faults", "faults", 'page faults', 'dovecot.faults', 'line'],
- 'lines': [
- ['min_faults', 'minor', 'absolute'],
- ['maj_faults', 'major', 'absolute']
- ]},
- 'context_switches': {
- 'options': [None, "Dovecot Context Switches", '', 'context switches', 'dovecot.context_switches', 'line'],
- 'lines': [
- ['vol_cs', 'voluntary', 'absolute'],
- ['invol_cs', 'involuntary', 'absolute']
- ]},
- 'io': {
- 'options': [None, "Dovecot Disk I/O", 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
- 'lines': [
- ['disk_input', 'read', 'incremental', 1, 1024],
- ['disk_output', 'write', 'incremental', -1, 1024]
- ]},
- 'net': {
- 'options': [None, "Dovecot Network Bandwidth", 'kilobits/s', 'network', 'dovecot.net', 'area'],
- 'lines': [
- ['read_bytes', 'read', 'incremental', 8, 1024],
- ['write_bytes', 'write', 'incremental', -8, 1024]
- ]},
- 'syscalls': {
- 'options': [None, "Dovecot Number of SysCalls", 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
- 'lines': [
- ['read_count', 'read', 'incremental'],
- ['write_count', 'write', 'incremental']
- ]},
- 'lookup': {
- 'options': [None, "Dovecot Lookups", 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
- 'lines': [
- ['mail_lookup_path', 'path', 'incremental'],
- ['mail_lookup_attr', 'attr', 'incremental']
- ]},
- 'cache': {
- 'options': [None, "Dovecot Cache Hits", 'hits/s', 'cache', 'dovecot.cache', 'line'],
- 'lines': [
- ['mail_cache_hits', 'hits', 'incremental']
- ]},
- 'auth': {
- 'options': [None, "Dovecot Authentications", 'attempts', 'logins', 'dovecot.auth', 'stacked'],
- 'lines': [
- ['auth_successes', 'ok', 'absolute'],
- ['auth_failures', 'failed', 'absolute']
- ]},
- 'auth_cache': {
- 'options': [None, "Dovecot Authentication Cache", 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
- 'lines': [
- ['auth_cache_hits', 'hit', 'absolute'],
- ['auth_cache_misses', 'miss', 'absolute']
- ]}
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "EXPORT\tglobal\r\n"
- self.host = None # localhost
- self.port = None # 24242
- # self._keep_alive = True
- self.unix_socket = "/var/run/dovecot/stats"
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from socket
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- except (ValueError, AttributeError):
- return None
-
- if raw is None:
- self.debug("dovecot returned no data")
- return None
-
- data = raw.split('\n')[:2]
- desc = data[0].split('\t')
- vals = data[1].split('\t')
- ret = dict()
- for i, _ in enumerate(desc):
- try:
- ret[str(desc[i])] = int(vals[i])
- except ValueError:
- continue
- return ret or None
diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py
deleted file mode 100644
index 9c2c58944..000000000
--- a/python.d/elasticsearch.chart.py
+++ /dev/null
@@ -1,554 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: elastic search node stats netdata python.d module
-# Author: l2isbad
-
-from collections import namedtuple
-from json import loads
-from socket import gethostbyname, gaierror
-from threading import Thread
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-priority = 60000
-retries = 60
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
-
-NODE_STATS = [
- 'indices.search.fetch_current',
- 'indices.search.fetch_total',
- 'indices.search.query_current',
- 'indices.search.query_total',
- 'indices.search.query_time_in_millis',
- 'indices.search.fetch_time_in_millis',
- 'indices.indexing.index_total',
- 'indices.indexing.index_current',
- 'indices.indexing.index_time_in_millis',
- 'indices.refresh.total',
- 'indices.refresh.total_time_in_millis',
- 'indices.flush.total',
- 'indices.flush.total_time_in_millis',
- 'indices.translog.operations',
- 'indices.translog.size_in_bytes',
- 'indices.translog.uncommitted_operations',
- 'indices.translog.uncommitted_size_in_bytes',
- 'indices.segments.count',
- 'indices.segments.terms_memory_in_bytes',
- 'indices.segments.stored_fields_memory_in_bytes',
- 'indices.segments.term_vectors_memory_in_bytes',
- 'indices.segments.norms_memory_in_bytes',
- 'indices.segments.points_memory_in_bytes',
- 'indices.segments.doc_values_memory_in_bytes',
- 'indices.segments.index_writer_memory_in_bytes',
- 'indices.segments.version_map_memory_in_bytes',
- 'indices.segments.fixed_bit_set_memory_in_bytes',
- 'jvm.gc.collectors.young.collection_count',
- 'jvm.gc.collectors.old.collection_count',
- 'jvm.gc.collectors.young.collection_time_in_millis',
- 'jvm.gc.collectors.old.collection_time_in_millis',
- 'jvm.mem.heap_used_percent',
- 'jvm.mem.heap_used_in_bytes',
- 'jvm.mem.heap_committed_in_bytes',
- 'jvm.buffer_pools.direct.count',
- 'jvm.buffer_pools.direct.used_in_bytes',
- 'jvm.buffer_pools.direct.total_capacity_in_bytes',
- 'jvm.buffer_pools.mapped.count',
- 'jvm.buffer_pools.mapped.used_in_bytes',
- 'jvm.buffer_pools.mapped.total_capacity_in_bytes',
- 'thread_pool.bulk.queue',
- 'thread_pool.bulk.rejected',
- 'thread_pool.index.queue',
- 'thread_pool.index.rejected',
- 'thread_pool.search.queue',
- 'thread_pool.search.rejected',
- 'thread_pool.merge.queue',
- 'thread_pool.merge.rejected',
- 'indices.fielddata.memory_size_in_bytes',
- 'indices.fielddata.evictions',
- 'breakers.fielddata.tripped',
- 'http.current_open',
- 'transport.rx_size_in_bytes',
- 'transport.tx_size_in_bytes',
- 'process.max_file_descriptors',
- 'process.open_file_descriptors'
-]
-
-CLUSTER_STATS = [
- 'nodes.count.data_only',
- 'nodes.count.master_data',
- 'nodes.count.total',
- 'nodes.count.master_only',
- 'nodes.count.client',
- 'indices.docs.count',
- 'indices.query_cache.hit_count',
- 'indices.query_cache.miss_count',
- 'indices.store.size_in_bytes',
- 'indices.count',
- 'indices.shards.total'
-]
-
-HEALTH_STATS = [
- 'number_of_nodes',
- 'number_of_data_nodes',
- 'number_of_pending_tasks',
- 'number_of_in_flight_fetch',
- 'active_shards',
- 'relocating_shards',
- 'unassigned_shards',
- 'delayed_unassigned_shards',
- 'initializing_shards',
- 'active_shards_percent_as_number'
-]
-
-LATENCY = {
- 'query_latency':
- {'total': 'indices_search_query_total',
- 'spent_time': 'indices_search_query_time_in_millis'},
- 'fetch_latency':
- {'total': 'indices_search_fetch_total',
- 'spent_time': 'indices_search_fetch_time_in_millis'},
- 'indexing_latency':
- {'total': 'indices_indexing_index_total',
- 'spent_time': 'indices_indexing_index_time_in_millis'},
- 'flushing_latency':
- {'total': 'indices_flush_total',
- 'spent_time': 'indices_flush_total_time_in_millis'}
-}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['search_performance_total', 'search_performance_current', 'search_performance_time',
- 'search_latency', 'index_performance_total', 'index_performance_current', 'index_performance_time',
- 'index_latency', 'index_translog_operations', 'index_translog_size', 'index_segments_count', 'index_segments_memory_writer',
- 'index_segments_memory', 'jvm_mem_heap', 'jvm_mem_heap_bytes', 'jvm_buffer_pool_count',
- 'jvm_direct_buffers_memory', 'jvm_mapped_buffers_memory', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors',
- 'host_metrics_http', 'host_metrics_transport', 'thread_pool_queued', 'thread_pool_rejected',
- 'fielddata_cache', 'fielddata_evictions_tripped', 'cluster_health_status', 'cluster_health_nodes',
- 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache', 'cluster_stats_docs',
- 'cluster_stats_store', 'cluster_stats_indices_shards']
-
-CHARTS = {
- 'search_performance_total': {
- 'options': [None, 'Queries And Fetches', 'number of', 'search performance',
- 'elastic.search_performance_total', 'stacked'],
- 'lines': [
- ['indices_search_query_total', 'queries', 'incremental'],
- ['indices_search_fetch_total', 'fetches', 'incremental']
- ]},
- 'search_performance_current': {
- 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
- 'elastic.search_performance_current', 'stacked'],
- 'lines': [
- ['indices_search_query_current', 'queries', 'absolute'],
- ['indices_search_fetch_current', 'fetches', 'absolute']
- ]},
- 'search_performance_time': {
- 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
- 'elastic.search_performance_time', 'stacked'],
- 'lines': [
- ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
- ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
- ]},
- 'search_latency': {
- 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
- 'lines': [
- ['query_latency', 'query', 'absolute', 1, 1000],
- ['fetch_latency', 'fetch', 'absolute', 1, 1000]
- ]},
- 'index_performance_total': {
- 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
- 'indexing performance', 'elastic.index_performance_total', 'stacked'],
- 'lines': [
- ['indices_indexing_index_total', 'indexed', 'incremental'],
- ['indices_refresh_total', 'refreshes', 'incremental'],
- ['indices_flush_total', 'flushes', 'incremental']
- ]},
- 'index_performance_current': {
- 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
- 'indexing performance', 'elastic.index_performance_current', 'stacked'],
- 'lines': [
- ['indices_indexing_index_current', 'documents', 'absolute']
- ]},
- 'index_performance_time': {
- 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
- 'elastic.index_performance_time', 'stacked'],
- 'lines': [
- ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
- ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
- ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
- ]},
- 'index_latency': {
- 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
- 'elastic.index_latency', 'stacked'],
- 'lines': [
- ['indexing_latency', 'indexing', 'absolute', 1, 1000],
- ['flushing_latency', 'flushing', 'absolute', 1, 1000]
- ]},
- 'index_translog_operations': {
- 'options': [None, 'Translog Operations', 'count', 'translog',
- 'elastic.index_translog_operations', 'area'],
- 'lines': [
- ['indices_translog_operations', 'total', 'absolute'],
- ['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
- ]},
- 'index_translog_size': {
- 'options': [None, 'Translog Size', 'MB', 'translog',
- 'elastic.index_translog_size', 'area'],
- 'lines': [
- ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
- ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
- ]},
- 'index_segments_count': {
- 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
- 'elastic.index_segments_count', 'line'],
- 'lines': [
- ['indices_segments_count', 'segments', 'absolute']
- ]},
- 'index_segments_memory_writer': {
- 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
- 'elastic.index_segments_memory_writer', 'area'],
- 'lines': [
- ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
- ]},
- 'index_segments_memory': {
- 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
- 'elastic.index_segments_memory', 'stacked'],
- 'lines': [
- ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
- ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567],
- ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567],
- ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567],
- ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567],
- ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
- ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
- ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
- ]},
- 'jvm_mem_heap': {
- 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
- 'elastic.jvm_heap', 'area'],
- 'lines': [
- ['jvm_mem_heap_used_percent', 'inuse', 'absolute']
- ]},
- 'jvm_mem_heap_bytes': {
- 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
- 'elastic.jvm_heap_bytes', 'area'],
- 'lines': [
- ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
- ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
- ]},
- 'jvm_buffer_pool_count': {
- 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
- 'elastic.jvm_buffer_pool_count', 'line'],
- 'lines': [
- ['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
- ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
- ]},
- 'jvm_direct_buffers_memory': {
- 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
- 'elastic.jvm_direct_buffers_memory', 'area'],
- 'lines': [
- ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
- ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]},
- 'jvm_mapped_buffers_memory': {
- 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
- 'elastic.jvm_mapped_buffers_memory', 'area'],
- 'lines': [
- ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
- ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]},
- 'jvm_gc_count': {
- 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
- 'lines': [
- ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
- ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
- ]},
- 'jvm_gc_time': {
- 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
- 'elastic.gc_time', 'stacked'],
- 'lines': [
- ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
- ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
- ]},
- 'thread_pool_queued': {
- 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
- 'elastic.thread_pool_queued', 'stacked'],
- 'lines': [
- ['thread_pool_bulk_queue', 'bulk', 'absolute'],
- ['thread_pool_index_queue', 'index', 'absolute'],
- ['thread_pool_search_queue', 'search', 'absolute'],
- ['thread_pool_merge_queue', 'merge', 'absolute']
- ]},
- 'thread_pool_rejected': {
- 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
- 'elastic.thread_pool_rejected', 'stacked'],
- 'lines': [
- ['thread_pool_bulk_rejected', 'bulk', 'absolute'],
- ['thread_pool_index_rejected', 'index', 'absolute'],
- ['thread_pool_search_rejected', 'search', 'absolute'],
- ['thread_pool_merge_rejected', 'merge', 'absolute']
- ]},
- 'fielddata_cache': {
- 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
- 'lines': [
- ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
- ]},
- 'fielddata_evictions_tripped': {
- 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
- 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
- 'lines': [
- ['indices_fielddata_evictions', 'evictions', 'incremental'],
- ['indices_fielddata_tripped', 'tripped', 'incremental']
- ]},
- 'cluster_health_nodes': {
- 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
- 'elastic.cluster_health_nodes', 'stacked'],
- 'lines': [
- ['number_of_nodes', 'nodes', 'absolute'],
- ['number_of_data_nodes', 'data_nodes', 'absolute'],
- ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
- ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
- ]},
- 'cluster_health_status': {
- 'options': [None, 'Cluster Status', 'status', 'cluster health API',
- 'elastic.cluster_health_status', 'area'],
- 'lines': [
- ['status_green', 'green', 'absolute'],
- ['status_red', 'red', 'absolute'],
- ['status_foo1', None, 'absolute'],
- ['status_foo2', None, 'absolute'],
- ['status_foo3', None, 'absolute'],
- ['status_yellow', 'yellow', 'absolute']
- ]},
- 'cluster_health_shards': {
- 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
- 'elastic.cluster_health_shards', 'stacked'],
- 'lines': [
- ['active_shards', 'active_shards', 'absolute'],
- ['relocating_shards', 'relocating_shards', 'absolute'],
- ['unassigned_shards', 'unassigned', 'absolute'],
- ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
- ['initializing_shards', 'initializing', 'absolute'],
- ['active_shards_percent_as_number', 'active_percent', 'absolute']
- ]},
- 'cluster_stats_nodes': {
- 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
- 'elastic.cluster_nodes', 'stacked'],
- 'lines': [
- ['nodes_count_data_only', 'data_only', 'absolute'],
- ['nodes_count_master_data', 'master_data', 'absolute'],
- ['nodes_count_total', 'total', 'absolute'],
- ['nodes_count_master_only', 'master_only', 'absolute'],
- ['nodes_count_client', 'client', 'absolute']
- ]},
- 'cluster_stats_query_cache': {
- 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
- 'elastic.cluster_query_cache', 'stacked'],
- 'lines': [
- ['indices_query_cache_hit_count', 'hit', 'incremental'],
- ['indices_query_cache_miss_count', 'miss', 'incremental']
- ]},
- 'cluster_stats_docs': {
- 'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
- 'elastic.cluster_docs', 'line'],
- 'lines': [
- ['indices_docs_count', 'docs', 'absolute']
- ]},
- 'cluster_stats_store': {
- 'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
- 'elastic.cluster_store', 'line'],
- 'lines': [
- ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
- ]},
- 'cluster_stats_indices_shards': {
- 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
- 'elastic.cluster_indices_shards', 'stacked'],
- 'lines': [
- ['indices_count', 'indices', 'absolute'],
- ['indices_shards_total', 'shards', 'absolute']
- ]},
- 'host_metrics_transport': {
- 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
- 'elastic.host_transport', 'area'],
- 'lines': [
- ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
- ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
- ]},
- 'host_metrics_file_descriptors': {
- 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
- 'elastic.host_descriptors', 'area'],
- 'lines': [
- ['file_descriptors_used', 'used', 'absolute', 1, 10]
- ]},
- 'host_metrics_http': {
- 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
- 'elastic.host_http_connections', 'line'],
- 'lines': [
- ['http_current_open', 'opened', 'absolute', 1, 1]
- ]}
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host')
- self.port = self.configuration.get('port', 9200)
- self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'),
- host=self.host,
- port=self.port)
- self.latency = dict()
- self.methods = list()
-
- def check(self):
- if not all([self.host,
- self.port,
- isinstance(self.host, str),
- isinstance(self.port, (str, int))]):
- self.error('Host is not defined in the module configuration file')
- return False
-
- # Hostname -> ip address
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(str(error))
- return False
-
- # Create URL for every Elasticsearch API
- self.methods = [METHODS(get_data=self._get_node_stats,
- url=self.url + '/_nodes/_local/stats',
- run=self.configuration.get('node_stats', True)),
- METHODS(get_data=self._get_cluster_health,
- url=self.url + '/_cluster/health',
- run=self.configuration.get('cluster_health', True)),
- METHODS(get_data=self._get_cluster_stats,
- url=self.url + '/_cluster/stats',
- run=self.configuration.get('cluster_stats', True))]
-
- # Remove disabled API calls from 'avail methods'
- return UrlService.check(self)
-
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
-
- for method in self.methods:
- if not method.run:
- continue
- th = Thread(target=method.get_data,
- args=(queue, method.url))
- th.start()
- threads.append(th)
-
- for thread in threads:
- thread.join()
- result.update(queue.get())
-
- return result or None
-
- def _get_cluster_health(self, queue, url):
- """
- Format data received from http request
- :return: dict
- """
-
- raw_data = self._get_raw_data(url)
-
- if not raw_data:
- return queue.put(dict())
-
- data = loads(raw_data)
- to_netdata = fetch_data_(raw_data=data,
- metrics=HEALTH_STATS)
-
- to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
- 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
- current_status = 'status_' + data['status']
- to_netdata[current_status] = 1
-
- return queue.put(to_netdata)
-
- def _get_cluster_stats(self, queue, url):
- """
- Format data received from http request
- :return: dict
- """
-
- raw_data = self._get_raw_data(url)
-
- if not raw_data:
- return queue.put(dict())
-
- data = loads(raw_data)
- to_netdata = fetch_data_(raw_data=data,
- metrics=CLUSTER_STATS)
-
- return queue.put(to_netdata)
-
- def _get_node_stats(self, queue, url):
- """
- Format data received from http request
- :return: dict
- """
-
- raw_data = self._get_raw_data(url)
-
- if not raw_data:
- return queue.put(dict())
-
- data = loads(raw_data)
-
- node = list(data['nodes'].keys())[0]
- to_netdata = fetch_data_(raw_data=data['nodes'][node],
- metrics=NODE_STATS)
-
- # Search, index, flush, fetch performance latency
- for key in LATENCY:
- try:
- to_netdata[key] = self.find_avg(total=to_netdata[LATENCY[key]['total']],
- spent_time=to_netdata[LATENCY[key]['spent_time']],
- key=key)
- except KeyError:
- continue
- if 'process_open_file_descriptors' in to_netdata and 'process_max_file_descriptors' in to_netdata:
- to_netdata['file_descriptors_used'] = round(float(to_netdata['process_open_file_descriptors'])
- / to_netdata['process_max_file_descriptors'] * 1000)
-
- return queue.put(to_netdata)
-
- def find_avg(self, total, spent_time, key):
- if key not in self.latency:
- self.latency[key] = dict(total=total,
- spent_time=spent_time)
- return 0
- if self.latency[key]['total'] != total:
- latency = float(spent_time - self.latency[key]['spent_time'])\
- / float(total - self.latency[key]['total']) * 1000
- self.latency[key]['total'] = total
- self.latency[key]['spent_time'] = spent_time
- return latency
- self.latency[key]['spent_time'] = spent_time
- return 0
-
-
-def fetch_data_(raw_data, metrics):
- data = dict()
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError:
- continue
- data['_'.join(metrics_list)] = value
- return data
diff --git a/python.d/example.chart.py b/python.d/example.chart.py
deleted file mode 100644
index ee7ff62fc..000000000
--- a/python.d/example.chart.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: example netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from random import SystemRandom
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values
-# update_every = 4
-priority = 90000
-retries = 60
-
-ORDER = ['random']
-CHARTS = {
- 'random': {
- 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
- 'lines': [
- ['random1']
- ]
- }
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.random = SystemRandom()
-
- @staticmethod
- def check():
- return True
-
- def get_data(self):
- data = dict()
-
- for i in range(1, 4):
- dimension_id = ''.join(['random', str(i)])
-
- if dimension_id not in self.charts['random']:
- self.charts['random'].add_dimension([dimension_id])
-
- data[dimension_id] = self.random.randint(0, 100)
-
- return data
diff --git a/python.d/exim.chart.py b/python.d/exim.chart.py
deleted file mode 100644
index 2e5b924ba..000000000
--- a/python.d/exim.chart.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: exim netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['qemails']
-
-CHARTS = {
- 'qemails': {
- 'options': [None, "Exim Queue Emails", "emails", 'queue', 'exim.qemails', 'line'],
- 'lines': [
- ['emails', None, 'absolute']
- ]}
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = "exim -bpc"
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- try:
- return {'emails': int(self._get_raw_data()[0])}
- except (ValueError, AttributeError):
- return None
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
deleted file mode 100644
index 895833f87..000000000
--- a/python.d/fail2ban.chart.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: fail2ban log netdata python.d module
-# Author: l2isbad
-
-import bisect
-
-from glob import glob
-from re import compile as r_compile
-from os import access as is_accessible, R_OK
-from os.path import isdir, getsize
-
-
-from bases.FrameworkServices.LogService import LogService
-
-priority = 60000
-retries = 60
-REGEX_JAILS = r_compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
-REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>U|B)[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
-ORDER = ['jails_bans', 'jails_in_jail']
-
-
-class Service(LogService):
- """
- fail2ban log class
- Reads logs line by line
- Jail auto detection included
- It produces following charts:
- * Bans per second for every jail
- * Banned IPs for every jail (since the last restart of netdata)
- """
- def __init__(self, configuration=None, name=None):
- LogService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = dict()
- self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
- self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
- self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
- self.exclude = self.configuration.get('exclude')
-
- def _get_data(self):
- """
- Parse new log lines
- :return: dict
- """
- raw = self._get_raw_data()
- if raw is None:
- return None
- elif not raw:
- return self.to_netdata
-
- # Fail2ban logs looks like
- # 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
- for row in raw:
- match = REGEX_DATA.search(row)
- if match:
- match_dict = match.groupdict()
- jail, action, ipaddr = match_dict['jail'], match_dict['action'], match_dict['ipaddr']
- if jail in self.jails_list:
- if action == 'B':
- self.to_netdata[jail] += 1
- if address_not_in_jail(self.banned_ips[jail], ipaddr, self.to_netdata[jail + '_in_jail']):
- self.to_netdata[jail + '_in_jail'] += 1
- else:
- if ipaddr in self.banned_ips[jail]:
- self.banned_ips[jail].remove(ipaddr)
- self.to_netdata[jail + '_in_jail'] -= 1
-
- return self.to_netdata
-
- def check(self):
- """
- :return: bool
-
- Check if the "log_path" is not empty and readable
- """
-
- if not (is_accessible(self.log_path, R_OK) and getsize(self.log_path) != 0):
- self.error('%s is not readable or empty' % self.log_path)
- return False
- self.jails_list, self.to_netdata, self.banned_ips = self.jails_auto_detection_()
- self.definitions = create_definitions_(self.jails_list)
- self.info('Jails: %s' % self.jails_list)
- return True
-
- def jails_auto_detection_(self):
- """
- return: <tuple>
-
- * jails_list - list of enabled jails (['ssh', 'apache', ...])
- * to_netdata - dict ({'ssh': 0, 'ssh_in_jail': 0, ...})
- * banned_ips - here will be stored all the banned ips ({'ssh': ['1.2.3.4', '5.6.7.8', ...], ...})
- """
- raw_jails_list = list()
- jails_list = list()
-
- for raw_jail in parse_configuration_files_(self.conf_path, self.conf_dir, self.error):
- raw_jails_list.extend(raw_jail)
-
- for jail, status in raw_jails_list:
- if status == 'true' and jail not in jails_list:
- jails_list.append(jail)
- elif status == 'false' and jail in jails_list:
- jails_list.remove(jail)
- # If for some reason parse failed we still can START with default jails_list.
- jails_list = list(set(jails_list) - set(self.exclude.split()
- if isinstance(self.exclude, str) else list())) or ['ssh']
-
- to_netdata = dict([(jail, 0) for jail in jails_list])
- to_netdata.update(dict([(jail + '_in_jail', 0) for jail in jails_list]))
- banned_ips = dict([(jail, list()) for jail in jails_list])
-
- return jails_list, to_netdata, banned_ips
-
-
-def create_definitions_(jails_list):
- """
- Chart definitions creating
- """
-
- definitions = {
- 'jails_bans': {'options': [None, 'Jails Ban Statistics', 'bans/s', 'bans', 'jail.bans', 'line'],
- 'lines': []},
- 'jails_in_jail': {'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs',
- 'in jail', 'jail.in_jail', 'line'],
- 'lines': []}}
- for jail in jails_list:
- definitions['jails_bans']['lines'].append([jail, jail, 'incremental'])
- definitions['jails_in_jail']['lines'].append([jail + '_in_jail', jail, 'absolute'])
-
- return definitions
-
-
-def parse_configuration_files_(jails_conf_path, jails_conf_dir, print_error):
- """
- :param jails_conf_path: <str>
- :param jails_conf_dir: <str>
- :param print_error: <function>
- :return: <tuple>
-
- Uses "find_jails_in_files" function to find all jails in the "jails_conf_dir" directory
- and in the "jails_conf_path"
-
- All files must endswith ".local" or ".conf"
- Return order is important.
- According man jail.conf it should be
- * jail.conf
- * jail.d/*.conf (in alphabetical order)
- * jail.local
- * jail.d/*.local (in alphabetical order)
- """
- path_conf, path_local, dir_conf, dir_local = list(), list(), list(), list()
-
- # Parse files in the directory
- if not (isinstance(jails_conf_dir, str) and isdir(jails_conf_dir)):
- print_error('%s is not a directory' % jails_conf_dir)
- else:
- dir_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(jails_conf_dir + '/*.conf')))
- dir_local = list(filter(lambda local: is_accessible(local, R_OK), glob(jails_conf_dir + '/*.local')))
- if not (dir_conf or dir_local):
- print_error('%s is empty or not readable' % jails_conf_dir)
- else:
- dir_conf, dir_local = (find_jails_in_files(dir_conf, print_error),
- find_jails_in_files(dir_local, print_error))
-
- # Parse .conf and .local files
- if isinstance(jails_conf_path, str) and jails_conf_path.endswith(('.local', '.conf')):
- path_conf, path_local = (find_jails_in_files([jails_conf_path.split('.')[0] + '.conf'], print_error),
- find_jails_in_files([jails_conf_path.split('.')[0] + '.local'], print_error))
-
- return path_conf, dir_conf, path_local, dir_local
-
-
-def find_jails_in_files(list_of_files, print_error):
- """
- :param list_of_files: <list>
- :param print_error: <function>
- :return: <list>
-
- Open a file and parse it to find all (enabled and disabled) jails
- The output is a list of tuples:
- [('ssh', 'true'), ('apache', 'false'), ...]
- """
- jails_list = list()
- for conf in list_of_files:
- if is_accessible(conf, R_OK):
- with open(conf, 'rt') as f:
- raw_data = f.readlines()
- data = ' '.join(line for line in raw_data if line.startswith(('[', 'enabled')))
- jails_list.extend(REGEX_JAILS.findall(data))
- else:
- print_error('%s is not readable or not exist' % conf)
- return jails_list
-
-
-def address_not_in_jail(pool, address, pool_size):
- """
- :param pool: <list>
- :param address: <str>
- :param pool_size: <int>
- :return: bool
-
- Checks if the address is in the pool.
- If not address will be added
- """
- index = bisect.bisect_left(pool, address)
- if index < pool_size:
- if pool[index] == address:
- return False
- bisect.insort_left(pool, address)
- return True
- else:
- bisect.insort_left(pool, address)
- return True
diff --git a/python.d/freeradius.chart.py b/python.d/freeradius.chart.py
deleted file mode 100644
index 3acc58d1a..000000000
--- a/python.d/freeradius.chart.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: freeradius netdata python.d module
-# Author: l2isbad
-
-from re import findall
-from subprocess import Popen, PIPE
-
-from bases.collection import find_binary
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
-update_every = 15
-
-RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
-
-CHARTS = {
- 'authentication': {
- 'options': [None, "Authentication", "packets/s", 'Authentication', 'freerad.auth', 'line'],
- 'lines': [
- ['access-accepts', None, 'incremental'],
- ['access-rejects', None, 'incremental'],
- ['auth-dropped-requests', 'dropped-requests', 'incremental'],
- ['auth-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['auth-invalid-requests', 'invalid-requests', 'incremental'],
- ['auth-malformed-requests', 'malformed-requests', 'incremental'],
- ['auth-unknown-types', 'unknown-types', 'incremental']
- ]},
- 'accounting': {
- 'options': [None, "Accounting", "packets/s", 'Accounting', 'freerad.acct', 'line'],
- 'lines': [
- ['accounting-requests', 'requests', 'incremental'],
- ['accounting-responses', 'responses', 'incremental'],
- ['acct-dropped-requests', 'dropped-requests', 'incremental'],
- ['acct-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['acct-invalid-requests', 'invalid-requests', 'incremental'],
- ['acct-malformed-requests', 'malformed-requests', 'incremental'],
- ['acct-unknown-types', 'unknown-types', 'incremental']
- ]},
- 'proxy-auth': {
- 'options': [None, "Proxy Authentication", "packets/s", 'Authentication', 'freerad.proxy.auth', 'line'],
- 'lines': [
- ['proxy-access-accepts', 'access-accepts', 'incremental'],
- ['proxy-access-rejects', 'access-rejects', 'incremental'],
- ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'],
- ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
- ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
- ['proxy-auth-unknown-types', 'unknown-types', 'incremental']
- ]},
- 'proxy-acct': {
- 'options': [None, "Proxy Accounting", "packets/s", 'Accounting', 'freerad.proxy.acct', 'line'],
- 'lines': [
- ['proxy-accounting-requests', 'requests', 'incremental'],
- ['proxy-accounting-responses', 'responses', 'incremental'],
- ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'],
- ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
- ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
- ['proxy-acct-unknown-types', 'unknown-types', 'incremental']
- ]}
-
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', '18121')
- self.secret = self.configuration.get('secret')
- self.acct = self.configuration.get('acct', False)
- self.proxy_auth = self.configuration.get('proxy_auth', False)
- self.proxy_acct = self.configuration.get('proxy_acct', False)
- chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
- self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
- self.echo = find_binary('echo')
- self.radclient = find_binary('radclient')
- self.sub_echo = [self.echo, RADIUS_MSG]
- self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x',
- ':'.join([self.host, self.port]), 'status', self.secret]
-
- def check(self):
- if not all([self.echo, self.radclient]):
- self.error('Can\'t locate "radclient" binary or binary is not executable by netdata')
- return False
- if not self.secret:
- self.error('"secret" not set')
- return None
-
- if self._get_raw_data():
- return True
- self.error('Request returned no data. Is server alive?')
- return False
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- result = self._get_raw_data()
- return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
-
- def _get_raw_data(self):
- """
- The following code is equivalent to
- 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
- | radclient -t 1 -r 1 host:port status secret'
- :return: str
- """
- try:
- process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False)
- process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False)
- process_echo.stdout.close()
- raw_result = process_rad.communicate()[0]
- except OSError:
- return None
- if process_rad.returncode is 0:
- return raw_result.decode()
- return None
diff --git a/python.d/go_expvar.chart.py b/python.d/go_expvar.chart.py
deleted file mode 100644
index cbd462570..000000000
--- a/python.d/go_expvar.chart.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: go_expvar netdata python.d module
-# Author: Jan Kral (kralewitz)
-
-from __future__ import division
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-
-MEMSTATS_CHARTS = {
- 'memstats_heap': {
- 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats',
- 'expvar.memstats.heap', 'line'],
- 'lines': [
- ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
- ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
- 'memstats_stack': {
- 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
- 'expvar.memstats.stack', 'line'],
- 'lines': [
- ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
- 'memstats_mspan': {
- 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
- 'expvar.memstats.mspan', 'line'],
- 'lines': [
- ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
- 'memstats_mcache': {
- 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
- 'expvar.memstats.mcache', 'line'],
- 'lines': [
- ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
- 'memstats_live_objects': {
- 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
- 'expvar.memstats.live_objects', 'line'],
- 'lines': [
- ['memstats_live_objects', 'live']
- ]},
- 'memstats_sys': {
- 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
- 'expvar.memstats.sys', 'line'],
- 'lines': [
- ['memstats_sys', 'sys', 'absolute', 1, 1024]
- ]},
- 'memstats_gc_pauses': {
- 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
- 'expvar.memstats.gc_pauses', 'line'],
- 'lines': [
- ['memstats_gc_pauses', 'avg']
- ]},
-}
-
-MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
- 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
-
-
-def flatten(d, top='', sep='.'):
- items = []
- for key, val in d.items():
- nkey = top + sep + key if top else key
- if isinstance(val, dict):
- items.extend(flatten(val, nkey, sep=sep).items())
- else:
- items.append((nkey, val))
- return dict(items)
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
-
- # if memstats collection is enabled, add the charts and their order
- if self.configuration.get('collect_memstats'):
- self.definitions = dict(MEMSTATS_CHARTS)
- self.order = list(MEMSTATS_ORDER)
- else:
- self.definitions = dict()
- self.order = list()
-
- # if extra charts are defined, parse their config
- extra_charts = self.configuration.get('extra_charts')
- if extra_charts:
- self._parse_extra_charts_config(extra_charts)
-
- def check(self):
- """
- Check if the module can collect data:
- 1) At least one JOB configuration has to be specified
- 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
- extra_chart must be defined.
-
- The configuration and URL check is provided by the UrlService class.
- """
-
- if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
- self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
- return False
-
- return UrlService.check(self)
-
- def _parse_extra_charts_config(self, extra_charts_config):
-
- # a place to store the expvar keys and their types
- self.expvars = dict()
-
- for chart in extra_charts_config:
-
- chart_dict = dict()
- chart_id = chart.get('id')
- chart_lines = chart.get('lines')
- chart_opts = chart.get('options', dict())
-
- if not all([chart_id, chart_lines]):
- self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
- continue
-
- chart_dict['options'] = [
- chart_opts.get('name', ''),
- chart_opts.get('title', ''),
- chart_opts.get('units', ''),
- chart_opts.get('family', ''),
- chart_opts.get('context', ''),
- chart_opts.get('chart_type', 'line')
- ]
- chart_dict['lines'] = list()
-
- # add the lines to the chart
- for line in chart_lines:
-
- ev_key = line.get('expvar_key')
- ev_type = line.get('expvar_type')
- line_id = line.get('id')
-
- if not all([ev_key, ev_type, line_id]):
- self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
- continue
-
- if ev_type not in ['int', 'float']:
- self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
- continue
-
- if ev_key in self.expvars:
- self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key))
- continue
-
- self.expvars[ev_key] = (ev_type, line_id)
-
- chart_dict['lines'].append(
- [
- line.get('id', ''),
- line.get('name', ''),
- line.get('algorithm', ''),
- line.get('multiplier', 1),
- line.get('divisor', 100 if ev_type == 'float' else 1),
- line.get('hidden', False)
- ]
- )
-
- self.order.append(chart_id)
- self.definitions[chart_id] = chart_dict
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- data = json.loads(raw_data)
-
- expvars = dict()
- if self.configuration.get('collect_memstats'):
- expvars.update(self._parse_memstats(data))
-
- if self.configuration.get('extra_charts'):
- # the memstats part of the data has been already parsed, so we remove it before flattening and checking
- # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
- del (data['memstats'])
- flattened = flatten(data)
- for k, v in flattened.items():
- ev = self.expvars.get(k)
- if not ev:
- # expvar is not defined in config, skip it
- continue
- try:
- key_type, line_id = ev
- if key_type == 'int':
- expvars[line_id] = int(v)
- elif key_type == 'float':
- # if the value type is float, multiply it by 1000 and set line divisor to 1000
- expvars[line_id] = float(v) * 100
- except ValueError:
- self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type))
- del self.expvars[k]
-
- return expvars
-
- @staticmethod
- def _parse_memstats(data):
-
- memstats = data['memstats']
-
- # calculate the number of live objects in memory
- live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
-
- # calculate GC pause times average
- # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
- # so we need to filter out the 0 values before the buffer is filled
- gc_pauses = memstats['PauseNs']
- try:
- gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
- # no GC cycles have occured yet
- except ZeroDivisionError:
- gc_pause_avg = 0
-
- return {
- 'memstats_heap_alloc': memstats['HeapAlloc'],
- 'memstats_heap_inuse': memstats['HeapInuse'],
- 'memstats_stack_inuse': memstats['StackInuse'],
- 'memstats_mspan_inuse': memstats['MSpanInuse'],
- 'memstats_mcache_inuse': memstats['MCacheInuse'],
- 'memstats_sys': memstats['Sys'],
- 'memstats_live_objects': live_objs,
- 'memstats_gc_pauses': gc_pause_avg,
- }
diff --git a/python.d/haproxy.chart.py b/python.d/haproxy.chart.py
deleted file mode 100644
index 3061f5ef2..000000000
--- a/python.d/haproxy.chart.py
+++ /dev/null
@@ -1,339 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: haproxy netdata python.d module
-# Author: l2isbad, ktarasz
-
-from collections import defaultdict
-from re import compile as re_compile
-
-try:
- from urlparse import urlparse
-except ImportError:
- from urllib.parse import urlparse
-
-from bases.FrameworkServices.SocketService import SocketService
-from bases.FrameworkServices.UrlService import UrlService
-
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['fbin', 'fbout', 'fscur', 'fqcur',
- 'fhrsp_1xx', 'fhrsp_2xx', 'fhrsp_3xx', 'fhrsp_4xx', 'fhrsp_5xx', 'fhrsp_other', 'fhrsp_total',
- 'bbin', 'bbout', 'bscur', 'bqcur',
- 'bhrsp_1xx', 'bhrsp_2xx', 'bhrsp_3xx', 'bhrsp_4xx', 'bhrsp_5xx', 'bhrsp_other', 'bhrsp_total',
- 'bqtime', 'bttime', 'brtime', 'bctime',
- 'health_sup', 'health_sdown', 'health_bdown', 'health_idle']
-
-CHARTS = {
- 'fbin': {
- 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'],
- 'lines': [
- ]},
- 'fbout': {
- 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'],
- 'lines': [
- ]},
- 'fscur': {
- 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'],
- 'lines': [
- ]},
- 'fqcur': {
- 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'],
- 'lines': [
- ]},
- 'fhrsp_1xx': {
- 'options': [None, "HTTP responses with 1xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
- 'lines': [
- ]},
- 'fhrsp_2xx': {
- 'options': [None, "HTTP responses with 2xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
- 'lines': [
- ]},
- 'fhrsp_3xx': {
- 'options': [None, "HTTP responses with 3xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
- 'lines': [
- ]},
- 'fhrsp_4xx': {
- 'options': [None, "HTTP responses with 4xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
- 'lines': [
- ]},
- 'fhrsp_5xx': {
- 'options': [None, "HTTP responses with 5xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
- 'lines': [
- ]},
- 'fhrsp_other': {
- 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'frontend', 'haproxy_f.hrsp_other', 'line'],
- 'lines': [
- ]},
- 'fhrsp_total': {
- 'options': [None, "HTTP responses", "responses", 'frontend', 'haproxy_f.hrsp_total', 'line'],
- 'lines': [
- ]},
- 'bbin': {
- 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'],
- 'lines': [
- ]},
- 'bbout': {
- 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'],
- 'lines': [
- ]},
- 'bscur': {
- 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'],
- 'lines': [
- ]},
- 'bqcur': {
- 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'],
- 'lines': [
- ]},
- 'bhrsp_1xx': {
- 'options': [None, "HTTP responses with 1xx code", "responses/s", 'backend', 'haproxy_b.hrsp_1xx', 'line'],
- 'lines': [
- ]},
- 'bhrsp_2xx': {
- 'options': [None, "HTTP responses with 2xx code", "responses/s", 'backend', 'haproxy_b.hrsp_2xx', 'line'],
- 'lines': [
- ]},
- 'bhrsp_3xx': {
- 'options': [None, "HTTP responses with 3xx code", "responses/s", 'backend', 'haproxy_b.hrsp_3xx', 'line'],
- 'lines': [
- ]},
- 'bhrsp_4xx': {
- 'options': [None, "HTTP responses with 4xx code", "responses/s", 'backend', 'haproxy_b.hrsp_4xx', 'line'],
- 'lines': [
- ]},
- 'bhrsp_5xx': {
- 'options': [None, "HTTP responses with 5xx code", "responses/s", 'backend', 'haproxy_b.hrsp_5xx', 'line'],
- 'lines': [
- ]},
- 'bhrsp_other': {
- 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'backend',
- 'haproxy_b.hrsp_other', 'line'],
- 'lines': [
- ]},
- 'bhrsp_total': {
- 'options': [None, "HTTP responses (total)", "responses/s", 'backend', 'haproxy_b.hrsp_total', 'line'],
- 'lines': [
- ]},
- 'bqtime': {
- 'options': [None, "The average queue time over the 1024 last requests", "ms", 'backend', 'haproxy_b.qtime', 'line'],
- 'lines': [
- ]},
- 'bctime': {
- 'options': [None, "The average connect time over the 1024 last requests", "ms", 'backend',
- 'haproxy_b.ctime', 'line'],
- 'lines': [
- ]},
- 'brtime': {
- 'options': [None, "The average response time over the 1024 last requests", "ms", 'backend',
- 'haproxy_b.rtime', 'line'],
- 'lines': [
- ]},
- 'bttime': {
- 'options': [None, "The average total session time over the 1024 last requests", "ms", 'backend',
- 'haproxy_b.ttime', 'line'],
- 'lines': [
- ]},
- 'health_sdown': {
- 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health',
- 'haproxy_hs.down', 'line'],
- 'lines': [
- ]},
- 'health_sup': {
- 'options': [None, "Backend Servers In UP State", "health servers", 'health',
- 'haproxy_hs.up', 'line'],
- 'lines': [
- ]},
- 'health_bdown': {
- 'options': [None, "Is Backend Alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'],
- 'lines': [
- ]},
- 'health_idle': {
- 'options': [None, "The Ratio Of Polling Time Vs Total Time", "percent", 'health', 'haproxy.idle', 'line'],
- 'lines': [
- ['idle', None, 'absolute']
- ]}
-}
-
-
-METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024},
- 'bout': {'algorithm': 'incremental', 'divisor': 1024},
- 'scur': {'algorithm': 'absolute', 'divisor': 1},
- 'qcur': {'algorithm': 'absolute', 'divisor': 1},
- 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1},
- }
-
-
-BACKEND_METRICS = {
- 'qtime': {'algorithm': 'absolute', 'divisor': 1},
- 'ctime': {'algorithm': 'absolute', 'divisor': 1},
- 'rtime': {'algorithm': 'absolute', 'divisor': 1},
- 'ttime': {'algorithm': 'absolute', 'divisor': 1}
-}
-
-
-REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
- socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
-
-
-class Service(UrlService, SocketService):
- def __init__(self, configuration=None, name=None):
- if 'socket' in configuration:
- SocketService.__init__(self, configuration=configuration, name=name)
- self.poll = SocketService
- self.options_ = dict(regex=REGEX['socket'],
- stat='show stat\n'.encode(),
- info='show info\n'.encode())
- else:
- UrlService.__init__(self, configuration=configuration, name=name)
- self.poll = UrlService
- self.options_ = dict(regex=REGEX['url'],
- stat=self.url,
- info=url_remove_params(self.url))
- self.order = ORDER
- self.definitions = CHARTS
-
- def check(self):
- if self.poll.check(self):
- self.create_charts()
- self.info('We are using %s.' % self.poll.__name__)
- return True
- return False
-
- def _get_data(self):
- to_netdata = dict()
- self.request, self.url = self.options_['stat'], self.options_['stat']
- stat_data = self._get_stat_data()
- self.request, self.url = self.options_['info'], self.options_['info']
- info_data = self._get_info_data(regex=self.options_['regex'])
-
- to_netdata.update(stat_data)
- to_netdata.update(info_data)
- return to_netdata or None
-
- def _get_stat_data(self):
- """
- :return: dict
- """
- raw_data = self.poll._get_raw_data(self)
-
- if not raw_data:
- return dict()
-
- raw_data = raw_data.splitlines()
- self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
- for _ in range(1, len(raw_data))])
- if not self.data:
- return dict()
-
- stat_data = dict()
-
- for frontend in self.data['frontend']:
- for metric in METRICS:
- idx = frontend['# pxname'].replace('.', '_')
- stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
-
- for backend in self.data['backend']:
- name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
- stat_data['hsup_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'UP')])
- stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'DOWN')])
- stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
- for metric in BACKEND_METRICS:
- stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
- hrsp_total = 0
- for metric in METRICS:
- stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
- if metric.startswith('hrsp_'):
- hrsp_total += int(backend.get(metric) or 0)
- stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
- return stat_data
-
- def _get_info_data(self, regex):
- """
- :return: dict
- """
- raw_data = self.poll._get_raw_data(self)
- if not raw_data:
- return dict()
-
- match = regex.search(raw_data)
- return match.groupdict() if match else dict()
-
- @staticmethod
- def _check_raw_data(data):
- """
- Check if all data has been gathered from socket
- :param data: str
- :return: boolean
- """
- return not bool(data)
-
- def create_charts(self):
- for front in self.data['frontend']:
- name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
- for metric in METRICS:
- self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
- name, METRICS[metric]['algorithm'], 1,
- METRICS[metric]['divisor']])
- self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
- for back in self.data['backend']:
- name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
- for metric in METRICS:
- self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
- name, METRICS[metric]['algorithm'], 1,
- METRICS[metric]['divisor']])
- self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
- for metric in BACKEND_METRICS:
- self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
- name, BACKEND_METRICS[metric]['algorithm'], 1,
- BACKEND_METRICS[metric]['divisor']])
- self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
- self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
- self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
-
-
-def parse_data_(data):
- def is_backend(backend):
- return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
-
- def is_frontend(frontend):
- return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
-
- def is_server(server):
- return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
-
- if not data:
- return None
-
- result = defaultdict(list)
- for elem in data:
- if is_backend(elem):
- result['backend'].append(elem)
- continue
- elif is_frontend(elem):
- result['frontend'].append(elem)
- continue
- elif is_server(elem):
- result['servers'].append(elem)
-
- return result or None
-
-
-def server_status(server, backend_name, status='DOWN'):
- return server.get('# pxname') == backend_name and server.get('status') == status
-
-
-def url_remove_params(url):
- parsed = urlparse(url or str())
- return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/python.d/hddtemp.chart.py b/python.d/hddtemp.chart.py
deleted file mode 100644
index 577cab09f..000000000
--- a/python.d/hddtemp.chart.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: hddtemp netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-
-import os
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values (can be overridden per job in `config`)
-#update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 7634
-# }}
-
-ORDER = ['temperatures']
-
-CHARTS = {
- 'temperatures': {
- 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]}}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self._keep_alive = False
- self.request = ""
- self.host = "127.0.0.1"
- self.port = 7634
- self.disks = list()
-
- def get_disks(self):
- try:
- disks = self.configuration['devices']
- self.info("Using configured disks {0}".format(disks))
- except (KeyError, TypeError):
- self.info("Autodetecting disks")
- return ["/dev/" + f for f in os.listdir("/dev") if len(f) == 3 and f.startswith("sd")]
-
- ret = list()
- for disk in disks:
- if not disk.startswith('/dev/'):
- disk = "/dev/" + disk
- ret.append(disk)
- if not ret:
- self.error("Provided disks cannot be found in /dev directory.")
- return ret
-
- def _check_raw_data(self, data):
- if not data.endswith('|'):
- return False
-
- if all(disk in data for disk in self.disks):
- return True
- return False
-
- def get_data(self):
- """
- Get data from TCP/IP socket
- :return: dict
- """
- try:
- raw = self._get_raw_data().split("|")[:-1]
- except AttributeError:
- self.error("no data received")
- return None
- data = dict()
- for i in range(len(raw) // 5):
- if not raw[i*5+1] in self.disks:
- continue
- try:
- val = int(raw[i*5+3])
- except ValueError:
- val = 0
- data[raw[i*5+1].replace("/dev/", "")] = val
-
- if not data:
- self.error("received data doesn't have needed records")
- return None
- return data
-
- def check(self):
- """
- Parse configuration, check if hddtemp is available, and dynamically create chart lines data
- :return: boolean
- """
- self._parse_config()
- self.disks = self.get_disks()
-
- data = self.get_data()
- if data is None:
- return False
-
- for name in data:
- self.definitions['temperatures']['lines'].append([name])
- return True
diff --git a/python.d/httpcheck.chart.py b/python.d/httpcheck.chart.py
deleted file mode 100644
index b0177ff90..000000000
--- a/python.d/httpcheck.chart.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: http check netdata python.d module
-# Original Author: ccremer (github.com/ccremer)
-
-import urllib3
-import re
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 3
-priority = 60000
-retries = 60
-
-# Response
-HTTP_RESPONSE_TIME = 'time'
-HTTP_RESPONSE_LENGTH = 'length'
-
-# Status dimensions
-HTTP_SUCCESS = 'success'
-HTTP_BAD_CONTENT = 'bad_content'
-HTTP_BAD_STATUS = 'bad_status'
-HTTP_TIMEOUT = 'timeout'
-HTTP_NO_CONNECTION = 'no_connection'
-
-ORDER = ['response_time', 'response_length', 'status']
-
-CHARTS = {
- 'response_time': {
- 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
- 'lines': [
- [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
- ]},
- 'response_length': {
- 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
- 'lines': [
- [HTTP_RESPONSE_LENGTH, 'length', 'absolute']
- ]},
- 'status': {
- 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
- 'lines': [
- [HTTP_SUCCESS, 'success', 'absolute'],
- [HTTP_BAD_CONTENT, 'bad content', 'absolute'],
- [HTTP_BAD_STATUS, 'bad status', 'absolute'],
- [HTTP_TIMEOUT, 'timeout', 'absolute'],
- [HTTP_NO_CONNECTION, 'no connection', 'absolute']
- ]}
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- pattern = self.configuration.get('regex')
- self.regex = re.compile(pattern) if pattern else None
- self.status_codes_accepted = self.configuration.get('status_accepted', [200])
- self.follow_redirect = self.configuration.get('redirect', True)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- data = dict()
- data[HTTP_SUCCESS] = 0
- data[HTTP_BAD_CONTENT] = 0
- data[HTTP_BAD_STATUS] = 0
- data[HTTP_TIMEOUT] = 0
- data[HTTP_NO_CONNECTION] = 0
- url = self.url
- try:
- start = time()
- status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False,
- redirect=self.follow_redirect)
- diff = time() - start
- data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0)
- self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format(
- url=url, code=status, diff=diff
- ))
- self.process_response(content, data, status)
-
- except urllib3.exceptions.NewConnectionError as error:
- self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error))
- data[HTTP_NO_CONNECTION] = 1
-
- except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
- self.debug("Connection timed out: {url}. Error: {error}".format(url=url, error=error))
- data[HTTP_TIMEOUT] = 1
-
- except urllib3.exceptions.HTTPError as error:
- self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error))
- data[HTTP_NO_CONNECTION] = 1
-
- except (TypeError, AttributeError) as error:
- self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
- return None
-
- return data
-
- def process_response(self, content, data, status):
- data[HTTP_RESPONSE_LENGTH] = len(content)
- self.debug('Content: \n\n{content}\n'.format(content=content))
- if status in self.status_codes_accepted:
- if self.regex and self.regex.search(content) is None:
- self.debug("No match for regex '{regex}' found".format(regex=self.regex.pattern))
- data[HTTP_BAD_CONTENT] = 1
- else:
- data[HTTP_SUCCESS] = 1
- else:
- data[HTTP_BAD_STATUS] = 1
diff --git a/python.d/icecast.chart.py b/python.d/icecast.chart.py
deleted file mode 100644
index 792b99f3f..000000000
--- a/python.d/icecast.chart.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: icecast netdata python.d module
-# Author: Ilya Mashchenko (l2isbad)
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['listeners']
-
-CHARTS = {
- 'listeners': {
- 'options': [None, 'Number Of Listeners', 'listeners',
- 'listeners', 'icecast.listeners', 'line'],
- 'lines': [
- ]}
-}
-
-
-class Source:
- def __init__(self, idx, data):
- self.name = 'source_{0}'.format(idx)
- self.is_active = data.get('stream_start') and data.get('server_name')
- self.listeners = data['listeners']
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url')
- self._manager = self._build_manager()
-
- def check(self):
- """
- Add active sources to the "listeners" chart
- :return: bool
- """
- sources = self.get_sources()
- if not sources:
- return None
-
- active_sources = 0
- for idx, raw_source in enumerate(sources):
- if Source(idx, raw_source).is_active:
- active_sources += 1
- dim_id = 'source_{0}'.format(idx)
- dim = 'source {0}'.format(idx)
- self.definitions['listeners']['lines'].append([dim_id, dim])
-
- return bool(active_sources)
-
- def _get_data(self):
- """
- Get number of listeners for every source
- :return: dict
- """
- sources = self.get_sources()
- if not sources:
- return None
-
- data = dict()
-
- for idx, raw_source in enumerate(sources):
- source = Source(idx, raw_source)
- data[source.name] = source.listeners
-
- return data
-
- def get_sources(self):
- """
- Format data received from http request and return list of sources
- :return: list
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- try:
- data = json.loads(raw_data)
- except ValueError as error:
- self.error("JSON decode error:", error)
- return None
-
- return data['icestats'].get('source')
diff --git a/python.d/ipfs.chart.py b/python.d/ipfs.chart.py
deleted file mode 100644
index 43500dfb5..000000000
--- a/python.d/ipfs.chart.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: IPFS netdata python.d module
-# Authors: Pawel Krupa (paulfantom), davidak
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost:5001'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
-
-CHARTS = {
- 'bandwidth': {
- 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
- 'lines': [
- ["in", None, "absolute", 8, 1000],
- ["out", None, "absolute", -8, 1000]
- ]},
- 'peers': {
- 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
- 'lines': [
- ["peers", None, 'absolute']
- ]},
- 'repo_size': {
- 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
- 'lines': [
- ["avail", None, "absolute", 1, 1e9],
- ["size", None, "absolute", 1, 1e9],
- ]},
- 'repo_objects': {
- 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
- 'lines': [
- ["objects", None, "absolute", 1, 1],
- ["pinned", None, "absolute", 1, 1],
- ["recursive_pins", None, "absolute", 1, 1]
- ]},
-}
-
-SI_zeroes = {'k': 3, 'm': 6, 'g': 9, 't': 12,
- 'p': 15, 'e': 18, 'z': 21, 'y': 24}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.baseurl = self.configuration.get('url', 'http://localhost:5001')
- self.order = ORDER
- self.definitions = CHARTS
- self.__storage_max = None
-
- def _get_json(self, sub_url):
- """
- :return: json decoding of the specified url
- """
- self.url = self.baseurl + sub_url
- try:
- return json.loads(self._get_raw_data())
- except (TypeError, ValueError):
- return dict()
-
- @staticmethod
- def _recursive_pins(keys):
- return len([k for k in keys if keys[k]["Type"] == b"recursive"])
-
- @staticmethod
- def _dehumanize(store_max):
- # convert from '10Gb' to 10000000000
- if not isinstance(store_max, int):
- store_max = store_max.lower()
- if store_max.endswith('b'):
- val, units = store_max[:-2], store_max[-2]
- if units in SI_zeroes:
- val += '0'*SI_zeroes[units]
- store_max = val
- try:
- store_max = int(store_max)
- except (TypeError, ValueError):
- store_max = None
- return store_max
-
- def _storagemax(self, store_cfg):
- if self.__storage_max is None:
- self.__storage_max = self._dehumanize(store_cfg['StorageMax'])
- return self.__storage_max
-
- def _get_data(self):
- """
- Get data from API
- :return: dict
- """
- # suburl : List of (result-key, original-key, transform-func)
- cfg = {
- '/api/v0/stats/bw':
- [('in', 'RateIn', int), ('out', 'RateOut', int)],
- '/api/v0/swarm/peers':
- [('peers', 'Strings', len)],
- '/api/v0/stats/repo':
- [('size', 'RepoSize', int), ('objects', 'NumObjects', int)],
- '/api/v0/pin/ls':
- [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)],
- '/api/v0/config/show': [('avail', 'Datastore', self._storagemax)]
- }
- r = dict()
- for suburl in cfg:
- in_json = self._get_json(suburl)
- for new_key, orig_key, xmute in cfg[suburl]:
- try:
- r[new_key] = xmute(in_json[orig_key])
- except Exception:
- continue
- return r or None
diff --git a/python.d/isc_dhcpd.chart.py b/python.d/isc_dhcpd.chart.py
deleted file mode 100644
index eb6338452..000000000
--- a/python.d/isc_dhcpd.chart.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: isc dhcpd lease netdata python.d module
-# Author: l2isbad
-
-import os
-import re
-import time
-
-
-try:
- import ipaddress
- HAVE_IP_ADDRESS = True
-except ImportError:
- HAVE_IP_ADDRESS = False
-
-from collections import defaultdict
-from copy import deepcopy
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 60000
-retries = 60
-
-ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
-
-CHARTS = {
- 'pools_utilization': {
- 'options': [None, 'Pools Utilization', '%', 'utilization',
- 'isc_dhcpd.utilization', 'line'],
- 'lines': []},
- 'pools_active_leases': {
- 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases',
- 'isc_dhcpd.active_leases', 'line'],
- 'lines': []},
- 'leases_total': {
- 'options': [None, 'All Active Leases', 'leases', 'active leases',
- 'isc_dhcpd.leases_total', 'line'],
- 'lines': [['leases_total', 'leases', 'absolute']],
- 'variables': [
- ['leases_size']
- ]
- }
-}
-
-
-class DhcpdLeasesFile:
- def __init__(self, path):
- self.path = path
- self.mod_time = 0
- self.size = 0
-
- def is_valid(self):
- return os.path.isfile(self.path) and os.access(self.path, os.R_OK)
-
- def is_changed(self):
- mod_time = os.path.getmtime(self.path)
- if mod_time != self.mod_time:
- self.mod_time = mod_time
- self.size = int(os.path.getsize(self.path) / 1024)
- return True
- return False
-
- def get_data(self):
- try:
- with open(self.path) as leases:
- result = defaultdict(dict)
- for row in leases:
- row = row.strip()
- if row.startswith('lease'):
- address = row[6:-2]
- elif row.startswith('iaaddr'):
- address = row[7:-2]
- elif row.startswith('ends'):
- result[address]['ends'] = row[5:-1]
- elif row.startswith('binding state'):
- result[address]['state'] = row[14:-1]
- return dict((k, v) for k, v in result.items() if len(v) == 2)
- except (OSError, IOError):
- return None
-
-
-class Pool:
- def __init__(self, name, network):
- self.id = re.sub(r'[:/.-]+', '_', name)
- self.name = name
- self.network = ipaddress.ip_network(address=u'%s' % network)
-
- def num_hosts(self):
- return self.network.num_addresses - 2
-
- def __contains__(self, item):
- return item.address in self.network
-
-
-class Lease:
- def __init__(self, address, ends, state):
- self.address = ipaddress.ip_address(address=u'%s' % address)
- self.ends = ends
- self.state = state
-
- def is_active(self, current_time):
- # lease_end_time might be epoch
- if self.ends.startswith('epoch'):
- epoch = int(self.ends.split()[1].replace(';', ''))
- return epoch - current_time > 0
- # max. int for lease-time causes lease to expire in year 2038.
- # dhcpd puts 'never' in the ends section of active lease
- elif self.ends == 'never':
- return True
- return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
-
- def is_valid(self):
- return self.state == 'active'
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
-
- lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
- self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
- self.pools = list()
- self.data = dict()
-
- # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
- # TODO: update algorithm to parse correctly 'local' db-time-format
-
- def check(self):
- if not HAVE_IP_ADDRESS:
- self.error("'python-ipaddress' module is needed")
- return False
-
- if not self.dhcpd_leases.is_valid():
- self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path))
- return False
-
- pools = self.configuration.get('pools')
- if not pools:
- self.error('Pools are not defined')
- return False
-
- for pool in pools:
- try:
- new_pool = Pool(name=pool, network=pools[pool])
- except ValueError as error:
- self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error))
- else:
- self.pools.append(new_pool)
-
- self.create_charts()
- return bool(self.pools)
-
- def get_data(self):
- """
- :return: dict
- """
- if not self.dhcpd_leases.is_changed():
- return self.data
-
- raw_leases = self.dhcpd_leases.get_data()
- if not raw_leases:
- self.data = dict()
- return None
-
- active_leases = list()
- current_time = time.mktime(time.gmtime())
-
- for address in raw_leases:
- try:
- new_lease = Lease(address, **raw_leases[address])
- except ValueError:
- continue
- else:
- if new_lease.is_active(current_time) and new_lease.is_valid():
- active_leases.append(new_lease)
-
- for pool in self.pools:
- count = len([ip for ip in active_leases if ip in pool])
- self.data[pool.id + '_active_leases'] = count
- self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000
-
- self.data['leases_size'] = self.dhcpd_leases.size
- self.data['leases_total'] = len(active_leases)
-
- return self.data
-
- def create_charts(self):
- for pool in self.pools:
- self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name,
- 'absolute', 1, 100])
- self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name])
diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py
deleted file mode 100644
index 35ba9058f..000000000
--- a/python.d/mdstat.chart.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: mdstat netdata python.d module
-# Author: l2isbad
-
-import re
-
-from collections import defaultdict
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 60000
-retries = 60
-update_every = 1
-
-ORDER = ['mdstat_health']
-CHARTS = {
- 'mdstat_health': {
- 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'],
- 'lines': list()
- }
-}
-
-OPERATIONS = ('check', 'resync', 'reshape', 'recovery', 'finish', 'speed')
-
-RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
- r'(?P<total_disks>[0-9]+)/'
- r'(?P<inuse_disks>[0-9]+)\]')
-
-RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
- r'(?P<operation>[a-z]+) =[ ]{1,2}'
- r'(?P<operation_status>[0-9.]+).+finish='
- r'(?P<finish>([0-9.]+))min speed='
- r'(?P<speed>[0-9]+)')
-
-
-def md_charts(md):
- order = ['{0}_disks'.format(md.name),
- '{0}_operation'.format(md.name),
- '{0}_finish'.format(md.name),
- '{0}_speed'.format(md.name)
- ]
-
- charts = dict()
- charts[order[0]] = {
- 'options': [None, 'Disks Stats', 'disks', md.name, 'md.disks', 'stacked'],
- 'lines': [
- ['{0}_total_disks'.format(md.name), 'total', 'absolute'],
- ['{0}_inuse_disks'.format(md.name), 'inuse', 'absolute']
- ]
- }
-
- charts['_'.join([md.name, 'operation'])] = {
- 'options': [None, 'Current Status', 'percent', md.name, 'md.status', 'line'],
- 'lines': [
- ['{0}_resync'.format(md.name), 'resync', 'absolute', 1, 100],
- ['{0}_recovery'.format(md.name), 'recovery', 'absolute', 1, 100],
- ['{0}_reshape'.format(md.name), 'reshape', 'absolute', 1, 100],
- ['{0}_check'.format(md.name), 'check', 'absolute', 1, 100]
- ]
- }
-
- charts['_'.join([md.name, 'finish'])] = {
- 'options': [None, 'Approximate Time Until Finish', 'seconds', md.name, 'md.rate', 'line'],
- 'lines': [
- ['{0}_finish'.format(md.name), 'finish in', 'absolute', 1, 1000]
- ]
- }
-
- charts['_'.join([md.name, 'speed'])] = {
- 'options': [None, 'Operation Speed', 'KB/s', md.name, 'md.rate', 'line'],
- 'lines': [
- ['{0}_speed'.format(md.name), 'speed', 'absolute', 1, 1000]
- ]
- }
-
- return order, charts
-
-
-class MD:
- def __init__(self, name, stats):
- self.name = name
- self.stats = stats
-
- def update_stats(self, stats):
- self.stats = stats
-
- def data(self):
- stats = dict(('_'.join([self.name, k]), v) for k, v in self.stats.items())
- stats['{0}_health'.format(self.name)] = int(self.stats['total_disks']) - int(self.stats['inuse_disks'])
- return stats
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.mds = dict()
-
- def check(self):
- arrays = find_arrays(self._get_raw_data())
- if not arrays:
- self.error('Failed to read data from /proc/mdstat or there is no active arrays')
- return None
- return True
-
- @staticmethod
- def _get_raw_data():
- """
- Read data from /proc/mdstat
- :return: str
- """
- try:
- with open('/proc/mdstat', 'rt') as proc_mdstat:
- return proc_mdstat.readlines() or None
- except (OSError, IOError):
- return None
-
- def get_data(self):
- """
- Parse data from _get_raw_data()
- :return: dict
- """
- arrays = find_arrays(self._get_raw_data())
- if not arrays:
- return None
-
- data = dict()
- for array, values in arrays.items():
-
- if array not in self.mds:
- md = MD(array, values)
- self.mds[md.name] = md
- self.create_new_array_charts(md)
- else:
- md = self.mds[array]
- md.update_stats(values)
-
- data.update(md.data())
-
- return data
-
- def create_new_array_charts(self, md):
- order, charts = md_charts(md)
-
- self.charts['mdstat_health'].add_dimension(['{0}_health'.format(md.name), md.name])
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
-
-def find_arrays(raw_data):
- if raw_data is None:
- return None
- data = defaultdict(str)
- counter = 1
-
- for row in (elem.strip() for elem in raw_data):
- if not row:
- counter += 1
- continue
- data[counter] = ' '.join([data[counter], row])
-
- arrays = dict()
- for value in data.values():
- match = RE_DISKS.search(value)
- if not match:
- continue
-
- match = match.groupdict()
- array = match.pop('array')
- arrays[array] = match
- for operation in OPERATIONS:
- arrays[array][operation] = 0
-
- match = RE_STATUS.search(value)
- if match:
- match = match.groupdict()
- if match['operation'] in OPERATIONS:
- arrays[array]['operation'] = match['operation']
- arrays[array][match['operation']] = float(match['operation_status']) * 100
- arrays[array]['finish'] = float(match['finish']) * 1000 * 60
- arrays[array]['speed'] = float(match['speed']) * 1000
-
- return arrays or None
diff --git a/python.d/memcached.chart.py b/python.d/memcached.chart.py
deleted file mode 100644
index 4f7adfa23..000000000
--- a/python.d/memcached.chart.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: memcached netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values (can be overridden per job in `config`)
-#update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 11211,
-# 'unix_socket': None
-# }}
-
-ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed',
- 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate']
-
-CHARTS = {
- 'cache': {
- 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'],
- 'lines': [
- ['avail', 'available', 'absolute', 1, 1048576],
- ['used', 'used', 'absolute', 1, 1048576]
- ]},
- 'net': {
- 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
- 'lines': [
- ['bytes_read', 'in', 'incremental', 8, 1024],
- ['bytes_written', 'out', 'incremental', -8, 1024]
- ]},
- 'connections': {
- 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
- 'lines': [
- ['curr_connections', 'current', 'incremental'],
- ['rejected_connections', 'rejected', 'incremental'],
- ['total_connections', 'total', 'incremental']
- ]},
- 'items': {
- 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
- 'lines': [
- ['curr_items', 'current', 'absolute'],
- ['total_items', 'total', 'absolute']
- ]},
- 'evicted_reclaimed': {
- 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
- 'lines': [
- ['reclaimed', 'reclaimed', 'absolute'],
- ['evictions', 'evicted', 'absolute']
- ]},
- 'get': {
- 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
- 'lines': [
- ['get_hits', 'hits', 'percent-of-absolute-row'],
- ['get_misses', 'misses', 'percent-of-absolute-row']
- ]},
- 'get_rate': {
- 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
- 'lines': [
- ['cmd_get', 'rate', 'incremental']
- ]},
- 'set_rate': {
- 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
- 'lines': [
- ['cmd_set', 'rate', 'incremental']
- ]},
- 'delete': {
- 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
- 'lines': [
- ['delete_hits', 'hits', 'percent-of-absolute-row'],
- ['delete_misses', 'misses', 'percent-of-absolute-row'],
- ]},
- 'cas': {
- 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
- 'lines': [
- ['cas_hits', 'hits', 'percent-of-absolute-row'],
- ['cas_misses', 'misses', 'percent-of-absolute-row'],
- ['cas_badval', 'bad value', 'percent-of-absolute-row']
- ]},
- 'increment': {
- 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
- 'lines': [
- ['incr_hits', 'hits', 'percent-of-absolute-row'],
- ['incr_misses', 'misses', 'percent-of-absolute-row']
- ]},
- 'decrement': {
- 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
- 'lines': [
- ['decr_hits', 'hits', 'percent-of-absolute-row'],
- ['decr_misses', 'misses', 'percent-of-absolute-row']
- ]},
- 'touch': {
- 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
- 'lines': [
- ['touch_hits', 'hits', 'percent-of-absolute-row'],
- ['touch_misses', 'misses', 'percent-of-absolute-row']
- ]},
- 'touch_rate': {
- 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
- 'lines': [
- ['cmd_touch', 'rate', 'incremental']
- ]}
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "stats\r\n"
- self.host = "localhost"
- self.port = 11211
- self._keep_alive = True
- self.unix_socket = None
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- response = self._get_raw_data()
- if response is None:
- # error has already been logged
- return None
-
- if response.startswith('ERROR'):
- self.error("received ERROR")
- return None
-
- try:
- parsed = response.split("\n")
- except AttributeError:
- self.error("response is invalid/empty")
- return None
-
- # split the response
- data = {}
- for line in parsed:
- if line.startswith('STAT'):
- try:
- t = line[5:].split(' ')
- data[t[0]] = t[1]
- except (IndexError, ValueError):
- self.debug("invalid line received: " + str(line))
-
- if not data:
- self.error("received data doesn't have any records")
- return None
-
- # custom calculations
- try:
- data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
- data['used'] = int(data['bytes'])
- except (KeyError, ValueError, TypeError):
- pass
-
- return data
-
- def _check_raw_data(self, data):
- if data.endswith('END\r\n'):
- self.debug("received full response from memcached")
- return True
-
- self.debug("waiting more data from memcached")
- return False
-
- def check(self):
- """
- Parse configuration, check if memcached is available
- :return: boolean
- """
- self._parse_config()
- data = self._get_data()
- if data is None:
- return False
- return True
diff --git a/python.d/mongodb.chart.py b/python.d/mongodb.chart.py
deleted file mode 100644
index 909a419da..000000000
--- a/python.d/mongodb.chart.py
+++ /dev/null
@@ -1,673 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: mongodb netdata python.d module
-# Author: l2isbad
-
-from copy import deepcopy
-from datetime import datetime
-from sys import exc_info
-
-try:
- from pymongo import MongoClient, ASCENDING, DESCENDING
- from pymongo.errors import PyMongoError
- PYMONGO = True
-except ImportError:
- PYMONGO = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-REPL_SET_STATES = [
- ('1', 'primary'),
- ('8', 'down'),
- ('2', 'secondary'),
- ('3', 'recovering'),
- ('5', 'startup2'),
- ('4', 'fatal'),
- ('7', 'arbiter'),
- ('6', 'unknown'),
- ('9', 'rollback'),
- ('10', 'removed'),
- ('0', 'startup')]
-
-
-def multiply_by_100(value):
- return value * 100
-
-
-DEFAULT_METRICS = [
- ('opcounters.delete', None, None),
- ('opcounters.update', None, None),
- ('opcounters.insert', None, None),
- ('opcounters.query', None, None),
- ('opcounters.getmore', None, None),
- ('globalLock.activeClients.readers', 'activeClients_readers', None),
- ('globalLock.activeClients.writers', 'activeClients_writers', None),
- ('connections.available', 'connections_available', None),
- ('connections.current', 'connections_current', None),
- ('mem.mapped', None, None),
- ('mem.resident', None, None),
- ('mem.virtual', None, None),
- ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
- ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
- ('asserts.msg', None, None),
- ('asserts.regular', None, None),
- ('asserts.user', None, None),
- ('asserts.warning', None, None),
- ('extra_info.page_faults', None, None),
- ('metrics.record.moves', None, None),
- ('backgroundFlushing.average_ms', None, multiply_by_100),
- ('backgroundFlushing.last_ms', None, multiply_by_100),
- ('backgroundFlushing.flushes', None, multiply_by_100),
- ('metrics.cursor.timedOut', None, None),
- ('metrics.cursor.open.total', 'cursor_total', None),
- ('metrics.cursor.open.noTimeout', None, None),
- ('cursors.timedOut', None, None),
- ('cursors.totalOpen', 'cursor_total', None)
-]
-
-DUR = [
- ('dur.commits', None, None),
- ('dur.journaledMB', None, multiply_by_100)
-]
-
-WIREDTIGER = [
- ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
- ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
- ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
- ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
- ('wiredTiger.cache.bytes currently in the cache', None, None),
- ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
- ('wiredTiger.cache.maximum bytes configured', None, None),
- ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
- ('wiredTiger.cache.modified pages evicted', 'modified', None)
-]
-
-TCMALLOC = [
- ('tcmalloc.generic.current_allocated_bytes', None, None),
- ('tcmalloc.generic.heap_size', None, None),
- ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
- ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
- ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
- ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
- ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
- ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
-]
-
-COMMANDS = [
- ('metrics.commands.count.total', 'count_total', None),
- ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
- ('metrics.commands.delete.total', 'delete_total', None),
- ('metrics.commands.eval.total', 'eval_total', None),
- ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
- ('metrics.commands.insert.total', 'insert_total', None),
- ('metrics.commands.delete.total', 'delete_total', None),
- ('metrics.commands.count.failed', 'count_failed', None),
- ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
- ('metrics.commands.delete.failed', 'delete_failed', None),
- ('metrics.commands.eval.failed', 'eval_failed', None),
- ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
- ('metrics.commands.insert.failed', 'insert_failed', None),
- ('metrics.commands.delete.failed', 'delete_failed', None)
-]
-
-LOCKS = [
- ('locks.Collection.acquireCount.R', 'Collection_R', None),
- ('locks.Collection.acquireCount.r', 'Collection_r', None),
- ('locks.Collection.acquireCount.W', 'Collection_W', None),
- ('locks.Collection.acquireCount.w', 'Collection_w', None),
- ('locks.Database.acquireCount.R', 'Database_R', None),
- ('locks.Database.acquireCount.r', 'Database_r', None),
- ('locks.Database.acquireCount.W', 'Database_W', None),
- ('locks.Database.acquireCount.w', 'Database_w', None),
- ('locks.Global.acquireCount.R', 'Global_R', None),
- ('locks.Global.acquireCount.r', 'Global_r', None),
- ('locks.Global.acquireCount.W', 'Global_W', None),
- ('locks.Global.acquireCount.w', 'Global_w', None),
- ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
- ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
- ('locks.oplog.acquireCount.r', 'oplog_r', None),
- ('locks.oplog.acquireCount.w', 'oplog_w', None)
-]
-
-DBSTATS = [
- 'dataSize',
- 'indexSize',
- 'storageSize',
- 'objects'
-]
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions',
- 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate',
- 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults',
- 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts',
- 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog',
- 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate']
-
-CHARTS = {
- 'read_operations': {
- 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
- 'mongodb.read_operations', 'line'],
- 'lines': [
- ['query', None, 'incremental'],
- ['getmore', None, 'incremental']
- ]},
- 'write_operations': {
- 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
- 'mongodb.write_operations', 'line'],
- 'lines': [
- ['insert', None, 'incremental'],
- ['update', None, 'incremental'],
- ['delete', None, 'incremental']
- ]},
- 'active_clients': {
- 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
- 'throughput metrics', 'mongodb.active_clients', 'line'],
- 'lines': [
- ['activeClients_readers', 'readers', 'absolute'],
- ['activeClients_writers', 'writers', 'absolute']
- ]},
- 'journaling_transactions': {
- 'options': [None, 'Transactions that have been written to the journal', 'commits',
- 'database performance', 'mongodb.journaling_transactions', 'line'],
- 'lines': [
- ['commits', None, 'absolute']
- ]},
- 'journaling_volume': {
- 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
- 'mongodb.journaling_volume', 'line'],
- 'lines': [
- ['journaledMB', 'volume', 'absolute', 1, 100]
- ]},
- 'background_flush_average': {
- 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
- 'mongodb.background_flush_average', 'line'],
- 'lines': [
- ['average_ms', 'time', 'absolute', 1, 100]
- ]},
- 'background_flush_last': {
- 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
- 'mongodb.background_flush_last', 'line'],
- 'lines': [
- ['last_ms', 'time', 'absolute', 1, 100]
- ]},
- 'background_flush_rate': {
- 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
- 'lines': [
- ['flushes', 'flushes', 'incremental', 1, 1]
- ]},
- 'wiredtiger_read': {
- 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
- 'mongodb.wiredtiger_read', 'stacked'],
- 'lines': [
- ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
- ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
- ]},
- 'wiredtiger_write': {
- 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
- 'mongodb.wiredtiger_write', 'stacked'],
- 'lines': [
- ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
- ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
- ]},
- 'cursors': {
- 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
- 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
- 'lines': [
- ['cursor_total', 'openned', 'absolute', 1, 1],
- ['noTimeout', None, 'absolute', 1, 1],
- ['timedOut', None, 'incremental', 1, 1]
- ]},
- 'connections': {
- 'options': [None, 'Currently connected clients and unused connections', 'connections',
- 'resource utilization', 'mongodb.connections', 'stacked'],
- 'lines': [
- ['connections_available', 'unused', 'absolute', 1, 1],
- ['connections_current', 'connected', 'absolute', 1, 1]
- ]},
- 'memory': {
- 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
- 'lines': [
- ['virtual', None, 'absolute', 1, 1],
- ['resident', None, 'absolute', 1, 1],
- ['nonmapped', None, 'absolute', 1, 1],
- ['mapped', None, 'absolute', 1, 1]
- ]},
- 'page_faults': {
- 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
- 'resource utilization', 'mongodb.page_faults', 'line'],
- 'lines': [
- ['page_faults', None, 'incremental', 1, 1]
- ]},
- 'queued_requests': {
- 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation',
- 'mongodb.queued_requests', 'line'],
- 'lines': [
- ['currentQueue_readers', 'readers', 'absolute', 1, 1],
- ['currentQueue_writers', 'writers', 'absolute', 1, 1]
- ]},
- 'record_moves': {
- 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
- 'resource saturation', 'mongodb.record_moves', 'line'],
- 'lines': [
- ['moves', None, 'incremental', 1, 1]
- ]},
- 'asserts': {
- 'options': [None, 'Number of message, warning, regular, corresponding to errors generated'
- ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
- 'lines': [
- ['msg', None, 'incremental', 1, 1],
- ['warning', None, 'incremental', 1, 1],
- ['regular', None, 'incremental', 1, 1],
- ['user', None, 'incremental', 1, 1]
- ]},
- 'wiredtiger_cache': {
- 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
- 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
- 'lines': [
- ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
- ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
- ]},
- 'wiredtiger_pages_evicted': {
- 'options': [None, 'Pages evicted from the cache',
- 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
- 'lines': [
- ['unmodified', None, 'absolute', 1, 1],
- ['modified', None, 'absolute', 1, 1]
- ]},
- 'dbstats_objects': {
- 'options': [None, 'Number of documents in the database among all the collections', 'documents',
- 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
- 'lines': [
- ]},
- 'tcmalloc_generic': {
- 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
- 'lines': [
- ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
- ['heap_size', 'heap_size', 'absolute', 1, 1048576]
- ]},
- 'tcmalloc_metrics': {
- 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
- 'lines': [
- ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
- ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
- ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
- ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
- ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
- ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
- ]},
- 'command_total_rate': {
- 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
- 'lines': [
- ['count_total', 'count', 'incremental', 1, 1],
- ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
- ['delete_total', 'delete', 'incremental', 1, 1],
- ['eval_total', 'eval', 'incremental', 1, 1],
- ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
- ['insert_total', 'insert', 'incremental', 1, 1],
- ['update_total', 'update', 'incremental', 1, 1]
- ]},
- 'command_failed_rate': {
- 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
- 'lines': [
- ['count_failed', 'count', 'incremental', 1, 1],
- ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
- ['delete_failed', 'delete', 'incremental', 1, 1],
- ['eval_failed', 'eval', 'incremental', 1, 1],
- ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
- ['insert_failed', 'insert', 'incremental', 1, 1],
- ['update_failed', 'update', 'incremental', 1, 1]
- ]},
- 'locks_collection': {
- 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
- 'lines': [
- ['Collection_R', 'shared', 'incremental'],
- ['Collection_W', 'exclusive', 'incremental'],
- ['Collection_r', 'intent_shared', 'incremental'],
- ['Collection_w', 'intent_exclusive', 'incremental']
- ]},
- 'locks_database': {
- 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
- 'lines': [
- ['Database_R', 'shared', 'incremental'],
- ['Database_W', 'exclusive', 'incremental'],
- ['Database_r', 'intent_shared', 'incremental'],
- ['Database_w', 'intent_exclusive', 'incremental']
- ]},
- 'locks_global': {
- 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
- 'lines': [
- ['Global_R', 'shared', 'incremental'],
- ['Global_W', 'exclusive', 'incremental'],
- ['Global_r', 'intent_shared', 'incremental'],
- ['Global_w', 'intent_exclusive', 'incremental']
- ]},
- 'locks_metadata': {
- 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
- 'lines': [
- ['Metadata_R', 'shared', 'incremental'],
- ['Metadata_w', 'intent_exclusive', 'incremental']
- ]},
- 'locks_oplog': {
- 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
- 'lines': [
- ['oplog_r', 'intent_shared', 'incremental'],
- ['oplog_w', 'intent_exclusive', 'incremental']
- ]}
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER[:]
- self.definitions = deepcopy(CHARTS)
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 27017)
- self.timeout = self.configuration.get('timeout', 100)
- self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
- self.connection = None
- self.do_replica = None
- self.databases = list()
-
- def check(self):
- if not PYMONGO:
- self.error('Pymongo module is needed to use mongodb.chart.py')
- return False
- self.connection, server_status, error = self._create_connection()
- if error:
- self.error(error)
- return False
-
- self.build_metrics_to_collect_(server_status)
-
- try:
- data = self._get_data()
- except (LookupError, SyntaxError, AttributeError):
- self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
- return False
- if isinstance(data, dict) and data:
- self._data_from_check = data
- self.create_charts_(server_status)
- return True
- self.error('_get_data() returned no data or type is not <dict>')
- return False
-
- def build_metrics_to_collect_(self, server_status):
-
- self.do_replica = 'repl' in server_status
- if 'dur' in server_status:
- self.metrics_to_collect.extend(DUR)
- if 'tcmalloc' in server_status:
- self.metrics_to_collect.extend(TCMALLOC)
- if 'commands' in server_status['metrics']:
- self.metrics_to_collect.extend(COMMANDS)
- if 'wiredTiger' in server_status:
- self.metrics_to_collect.extend(WIREDTIGER)
- if 'Collection' in server_status['locks']:
- self.metrics_to_collect.extend(LOCKS)
-
- def create_charts_(self, server_status):
-
- if 'dur' not in server_status:
- self.order.remove('journaling_transactions')
- self.order.remove('journaling_volume')
-
- if 'backgroundFlushing' not in server_status:
- self.order.remove('background_flush_average')
- self.order.remove('background_flush_last')
- self.order.remove('background_flush_rate')
-
- if 'wiredTiger' not in server_status:
- self.order.remove('wiredtiger_write')
- self.order.remove('wiredtiger_read')
- self.order.remove('wiredtiger_cache')
-
- if 'tcmalloc' not in server_status:
- self.order.remove('tcmalloc_generic')
- self.order.remove('tcmalloc_metrics')
-
- if 'commands' not in server_status['metrics']:
- self.order.remove('command_total_rate')
- self.order.remove('command_failed_rate')
-
- if 'Collection' not in server_status['locks']:
- self.order.remove('locks_collection')
- self.order.remove('locks_database')
- self.order.remove('locks_global')
- self.order.remove('locks_metadata')
-
- if 'oplog' not in server_status['locks']:
- self.order.remove('locks_oplog')
-
- for dbase in self.databases:
- self.order.append('_'.join([dbase, 'dbstats']))
- self.definitions['_'.join([dbase, 'dbstats'])] = {
- 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
- 'storage size metrics', 'mongodb.dbstats', 'line'],
- 'lines': [
- ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
- ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
- ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
- ]}
- self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
-
- if self.do_replica:
- def create_lines(hosts, string):
- lines = list()
- for host in hosts:
- dim_id = '_'.join([host, string])
- lines.append([dim_id, host, 'absolute', 1, 1000])
- return lines
-
- def create_state_lines(states):
- lines = list()
- for state, description in states:
- dim_id = '_'.join([host, 'state', state])
- lines.append([dim_id, description, 'absolute', 1, 1])
- return lines
-
- all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
- this_host = server_status['repl']['me']
- other_hosts = [host for host in all_hosts if host != this_host]
-
- if 'local' in self.databases:
- self.order.append('oplog_window')
- self.definitions['oplog_window'] = {
- 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
- 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
- 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
- # Create "heartbeat delay" chart
- self.order.append('heartbeat_delay')
- self.definitions['heartbeat_delay'] = {
- 'options': [None, 'Time when last heartbeat was received'
- ' from the replica set member (lastHeartbeatRecv)',
- 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
- 'lines': create_lines(other_hosts, 'heartbeat_lag')}
- # Create "optimedate delay" chart
- self.order.append('optimedate_delay')
- self.definitions['optimedate_delay'] = {
- 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
- 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
- 'lines': create_lines(all_hosts, 'optimedate')}
- # Create "replica set members state" chart
- for host in all_hosts:
- chart_name = '_'.join([host, 'state'])
- self.order.append(chart_name)
- self.definitions[chart_name] = {
- 'options': [None, 'Replica set member (%s) current state' % host, 'state',
- 'replication and oplog', 'mongodb.replication_state', 'line'],
- 'lines': create_state_lines(REPL_SET_STATES)}
-
- def _get_raw_data(self):
- raw_data = dict()
-
- raw_data.update(self.get_server_status() or dict())
- raw_data.update(self.get_db_stats() or dict())
- raw_data.update(self.get_repl_set_get_status() or dict())
- raw_data.update(self.get_get_replication_info() or dict())
-
- return raw_data or None
-
- def get_server_status(self):
- raw_data = dict()
- try:
- raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
- except PyMongoError:
- return None
- else:
- return raw_data
-
- def get_db_stats(self):
- if not self.databases:
- return None
-
- raw_data = dict()
- raw_data['dbStats'] = dict()
- try:
- for dbase in self.databases:
- raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
- return raw_data
- except PyMongoError:
- return None
-
- def get_repl_set_get_status(self):
- if not self.do_replica:
- return None
-
- raw_data = dict()
- try:
- raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
- return raw_data
- except PyMongoError:
- return None
-
- def get_get_replication_info(self):
- if not (self.do_replica and 'local' in self.databases):
- return None
-
- raw_data = dict()
- raw_data['getReplicationInfo'] = dict()
- try:
- raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
- "$natural", ASCENDING).limit(1)[0]
- raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
- "$natural", DESCENDING).limit(1)[0]
- return raw_data
- except PyMongoError:
- return None
-
- def _get_data(self):
- """
- :return: dict
- """
- raw_data = self._get_raw_data()
-
- if not raw_data:
- return None
-
- to_netdata = dict()
- serverStatus = raw_data['serverStatus']
- dbStats = raw_data.get('dbStats')
- replSetGetStatus = raw_data.get('replSetGetStatus')
- getReplicationInfo = raw_data.get('getReplicationInfo')
- utc_now = datetime.utcnow()
-
- # serverStatus
- for metric, new_name, func in self.metrics_to_collect:
- value = serverStatus
- for key in metric.split('.'):
- try:
- value = value[key]
- except KeyError:
- break
-
- if not isinstance(value, dict) and key:
- to_netdata[new_name or key] = value if not func else func(value)
-
- to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
- to_netdata['mapped'])
- if to_netdata.get('maximum bytes configured'):
- maximum = to_netdata['maximum bytes configured']
- to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache']
- * 100 / maximum * 1000)
- to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache']
- * 100 / maximum * 1000)
-
- # dbStats
- if dbStats:
- for dbase in dbStats:
- for metric in DBSTATS:
- key = '_'.join([dbase, metric])
- to_netdata[key] = dbStats[dbase][metric]
-
- # replSetGetStatus
- if replSetGetStatus:
- other_hosts = list()
- members = replSetGetStatus['members']
- unix_epoch = datetime(1970, 1, 1, 0, 0)
-
- for member in members:
- if not member.get('self'):
- other_hosts.append(member)
- # Replica set time diff between current time and time when last entry from the oplog was applied
- if member.get('optimeDate', unix_epoch) != unix_epoch:
- member_optimedate = member['name'] + '_optimedate'
- to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
- multiplier=1000))})
- # Replica set members state
- member_state = member['name'] + '_state'
- for elem in REPL_SET_STATES:
- state = elem[0]
- to_netdata.update({'_'.join([member_state, state]): 0})
- to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
- # Heartbeat lag calculation
- for other in other_hosts:
- if other['lastHeartbeatRecv'] != unix_epoch:
- node = other['name'] + '_heartbeat_lag'
- to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'],
- multiplier=1000))
-
- if getReplicationInfo:
- first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
- last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
- to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
-
- return to_netdata
-
- def _create_connection(self):
- conn_vars = {'host': self.host, 'port': self.port}
- if hasattr(MongoClient, 'server_selection_timeout'):
- conn_vars.update({'serverselectiontimeoutms': self.timeout})
- try:
- connection = MongoClient(**conn_vars)
- if self.user and self.password:
- connection.admin.authenticate(name=self.user, password=self.password)
- # elif self.user:
- # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
- server_status = connection.admin.command('serverStatus')
- except PyMongoError as error:
- return None, None, str(error)
- else:
- try:
- self.databases = connection.database_names()
- except PyMongoError as error:
- self.info('Can\'t collect databases: %s' % str(error))
- return connection, server_status, None
-
-
-def delta_calculation(delta, multiplier=1):
- if hasattr(delta, 'total_seconds'):
- return delta.total_seconds() * multiplier
- return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py
deleted file mode 100644
index 4c7058b26..000000000
--- a/python.d/mysql.chart.py
+++ /dev/null
@@ -1,491 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: MySQL netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.MySQLService import MySQLService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 3
-priority = 60000
-retries = 60
-
-# query executed on MySQL server
-QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
-QUERY_SLAVE = 'SHOW SLAVE STATUS;'
-
-GLOBAL_STATS = [
- 'Bytes_received',
- 'Bytes_sent',
- 'Queries',
- 'Questions',
- 'Slow_queries',
- 'Handler_commit',
- 'Handler_delete',
- 'Handler_prepare',
- 'Handler_read_first',
- 'Handler_read_key',
- 'Handler_read_next',
- 'Handler_read_prev',
- 'Handler_read_rnd',
- 'Handler_read_rnd_next',
- 'Handler_rollback',
- 'Handler_savepoint',
- 'Handler_savepoint_rollback',
- 'Handler_update',
- 'Handler_write',
- 'Table_locks_immediate',
- 'Table_locks_waited',
- 'Select_full_join',
- 'Select_full_range_join',
- 'Select_range',
- 'Select_range_check',
- 'Select_scan',
- 'Sort_merge_passes',
- 'Sort_range',
- 'Sort_scan',
- 'Created_tmp_disk_tables',
- 'Created_tmp_files',
- 'Created_tmp_tables',
- 'Connections',
- 'Aborted_connects',
- 'Binlog_cache_disk_use',
- 'Binlog_cache_use',
- 'Threads_connected',
- 'Threads_created',
- 'Threads_cached',
- 'Threads_running',
- 'Thread_cache_misses',
- 'Innodb_data_read',
- 'Innodb_data_written',
- 'Innodb_data_reads',
- 'Innodb_data_writes',
- 'Innodb_data_fsyncs',
- 'Innodb_data_pending_reads',
- 'Innodb_data_pending_writes',
- 'Innodb_data_pending_fsyncs',
- 'Innodb_log_waits',
- 'Innodb_log_write_requests',
- 'Innodb_log_writes',
- 'Innodb_os_log_fsyncs',
- 'Innodb_os_log_pending_fsyncs',
- 'Innodb_os_log_pending_writes',
- 'Innodb_os_log_written',
- 'Innodb_row_lock_current_waits',
- 'Innodb_rows_inserted',
- 'Innodb_rows_read',
- 'Innodb_rows_updated',
- 'Innodb_rows_deleted',
- 'Innodb_buffer_pool_pages_data',
- 'Innodb_buffer_pool_pages_dirty',
- 'Innodb_buffer_pool_pages_free',
- 'Innodb_buffer_pool_pages_flushed',
- 'Innodb_buffer_pool_pages_misc',
- 'Innodb_buffer_pool_pages_total',
- 'Innodb_buffer_pool_bytes_data',
- 'Innodb_buffer_pool_bytes_dirty',
- 'Innodb_buffer_pool_read_ahead',
- 'Innodb_buffer_pool_read_ahead_evicted',
- 'Innodb_buffer_pool_read_ahead_rnd',
- 'Innodb_buffer_pool_read_requests',
- 'Innodb_buffer_pool_write_requests',
- 'Innodb_buffer_pool_reads',
- 'Innodb_buffer_pool_wait_free',
- 'Qcache_hits',
- 'Qcache_lowmem_prunes',
- 'Qcache_inserts',
- 'Qcache_not_cached',
- 'Qcache_queries_in_cache',
- 'Qcache_free_memory',
- 'Qcache_free_blocks',
- 'Qcache_total_blocks',
- 'Key_blocks_unused',
- 'Key_blocks_used',
- 'Key_blocks_not_flushed',
- 'Key_read_requests',
- 'Key_write_requests',
- 'Key_reads',
- 'Key_writes',
- 'Open_files',
- 'Opened_files',
- 'Binlog_stmt_cache_disk_use',
- 'Binlog_stmt_cache_use',
- 'Connection_errors_accept',
- 'Connection_errors_internal',
- 'Connection_errors_max_connections',
- 'Connection_errors_peer_address',
- 'Connection_errors_select',
- 'Connection_errors_tcpwrap',
- 'wsrep_local_recv_queue',
- 'wsrep_local_send_queue',
- 'wsrep_received',
- 'wsrep_replicated',
- 'wsrep_received_bytes',
- 'wsrep_replicated_bytes',
- 'wsrep_local_bf_aborts',
- 'wsrep_local_cert_failures',
- 'wsrep_flow_control_paused_ns']
-
-def slave_seconds(value):
- try:
- return int(value)
- except (TypeError, ValueError):
- return -1
-
-
-def slave_running(value):
- return 1 if value == 'Yes' else -1
-
-
-SLAVE_STATS = [
- ('Seconds_Behind_Master', slave_seconds),
- ('Slave_SQL_Running', slave_running),
- ('Slave_IO_Running', slave_running)
-]
-
-ORDER = ['net',
- 'queries',
- 'handlers',
- 'table_locks',
- 'join_issues', 'sort_issues',
- 'tmp',
- 'connections', 'connection_errors',
- 'binlog_cache', 'binlog_stmt_cache',
- 'threads', 'thread_cache_misses',
- 'innodb_io', 'innodb_io_ops', 'innodb_io_pending_ops', 'innodb_log', 'innodb_os_log', 'innodb_os_log_io',
- 'innodb_cur_row_lock', 'innodb_rows', 'innodb_buffer_pool_pages', 'innodb_buffer_pool_bytes',
- 'innodb_buffer_pool_read_ahead', 'innodb_buffer_pool_reqs', 'innodb_buffer_pool_ops',
- 'qcache_ops', 'qcache', 'qcache_freemem', 'qcache_memblocks',
- 'key_blocks', 'key_requests', 'key_disk_ops',
- 'files', 'files_rate', 'slave_behind', 'slave_status',
- 'galera_writesets', 'galera_bytes', 'galera_queue', 'galera_conflicts', 'galera_flow_control']
-
-CHARTS = {
- 'net': {
- 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
- 'lines': [
- ['Bytes_received', 'in', 'incremental', 8, 1024],
- ['Bytes_sent', 'out', 'incremental', -8, 1024]
- ]},
- 'queries': {
- 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
- 'lines': [
- ['Queries', 'queries', 'incremental'],
- ['Questions', 'questions', 'incremental'],
- ['Slow_queries', 'slow_queries', 'incremental']
- ]},
- 'handlers': {
- 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
- 'lines': [
- ['Handler_commit', 'commit', 'incremental'],
- ['Handler_delete', 'delete', 'incremental'],
- ['Handler_prepare', 'prepare', 'incremental'],
- ['Handler_read_first', 'read_first', 'incremental'],
- ['Handler_read_key', 'read_key', 'incremental'],
- ['Handler_read_next', 'read_next', 'incremental'],
- ['Handler_read_prev', 'read_prev', 'incremental'],
- ['Handler_read_rnd', 'read_rnd', 'incremental'],
- ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
- ['Handler_rollback', 'rollback', 'incremental'],
- ['Handler_savepoint', 'savepoint', 'incremental'],
- ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
- ['Handler_update', 'update', 'incremental'],
- ['Handler_write', 'write', 'incremental']
- ]},
- 'table_locks': {
- 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
- 'lines': [
- ['Table_locks_immediate', 'immediate', 'incremental'],
- ['Table_locks_waited', 'waited', 'incremental', -1, 1]
- ]},
- 'join_issues': {
- 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
- 'lines': [
- ['Select_full_join', 'full_join', 'incremental'],
- ['Select_full_range_join', 'full_range_join', 'incremental'],
- ['Select_range', 'range', 'incremental'],
- ['Select_range_check', 'range_check', 'incremental'],
- ['Select_scan', 'scan', 'incremental']
- ]},
- 'sort_issues': {
- 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
- 'lines': [
- ['Sort_merge_passes', 'merge_passes', 'incremental'],
- ['Sort_range', 'range', 'incremental'],
- ['Sort_scan', 'scan', 'incremental']
- ]},
- 'tmp': {
- 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
- 'lines': [
- ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
- ['Created_tmp_files', 'files', 'incremental'],
- ['Created_tmp_tables', 'tables', 'incremental']
- ]},
- 'connections': {
- 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
- 'lines': [
- ['Connections', 'all', 'incremental'],
- ['Aborted_connects', 'aborted', 'incremental']
- ]},
- 'binlog_cache': {
- 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
- 'lines': [
- ['Binlog_cache_disk_use', 'disk', 'incremental'],
- ['Binlog_cache_use', 'all', 'incremental']
- ]},
- 'threads': {
- 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
- 'lines': [
- ['Threads_connected', 'connected', 'absolute'],
- ['Threads_created', 'created', 'incremental'],
- ['Threads_cached', 'cached', 'absolute', -1, 1],
- ['Threads_running', 'running', 'absolute'],
- ]},
- 'thread_cache_misses': {
- 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
- 'lines': [
- ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
- ]},
- 'innodb_io': {
- 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
- 'lines': [
- ['Innodb_data_read', 'read', 'incremental', 1, 1024],
- ['Innodb_data_written', 'write', 'incremental', -1, 1024]
- ]},
- 'innodb_io_ops': {
- 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
- 'lines': [
- ['Innodb_data_reads', 'reads', 'incremental'],
- ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
- ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
- ]},
- 'innodb_io_pending_ops': {
- 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb',
- 'mysql.innodb_io_pending_ops', 'line'],
- 'lines': [
- ['Innodb_data_pending_reads', 'reads', 'absolute'],
- ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
- ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
- ]},
- 'innodb_log': {
- 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
- 'lines': [
- ['Innodb_log_waits', 'waits', 'incremental'],
- ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
- ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
- ]},
- 'innodb_os_log': {
- 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
- 'lines': [
- ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
- ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
- ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
- ]},
- 'innodb_os_log_io': {
- 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
- 'lines': [
- ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
- ]},
- 'innodb_cur_row_lock': {
- 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb',
- 'mysql.innodb_cur_row_lock', 'area'],
- 'lines': [
- ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
- ]},
- 'innodb_rows': {
- 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
- 'lines': [
- ['Innodb_rows_inserted', 'inserted', 'incremental'],
- ['Innodb_rows_read', 'read', 'incremental', 1, 1],
- ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
- ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
- ]},
- 'innodb_buffer_pool_pages': {
- 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb',
- 'mysql.innodb_buffer_pool_pages', 'line'],
- 'lines': [
- ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
- ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
- ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
- ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
- ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
- ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
- ]},
- 'innodb_buffer_pool_bytes': {
- 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
- ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
- ]},
- 'innodb_buffer_pool_read_ahead': {
- 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
- 'mysql.innodb_buffer_pool_read_ahead', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
- ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
- ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
- ]},
- 'innodb_buffer_pool_reqs': {
- 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
- 'mysql.innodb_buffer_pool_reqs', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
- ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
- ]},
- 'innodb_buffer_pool_ops': {
- 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
- 'mysql.innodb_buffer_pool_ops', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
- ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
- ]},
- 'qcache_ops': {
- 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
- 'lines': [
- ['Qcache_hits', 'hits', 'incremental'],
- ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
- ['Qcache_inserts', 'inserts', 'incremental'],
- ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
- ]},
- 'qcache': {
- 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
- 'lines': [
- ['Qcache_queries_in_cache', 'queries', 'absolute']
- ]},
- 'qcache_freemem': {
- 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
- 'lines': [
- ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
- ]},
- 'qcache_memblocks': {
- 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
- 'lines': [
- ['Qcache_free_blocks', 'free', 'absolute'],
- ['Qcache_total_blocks', 'total', 'absolute']
- ]},
- 'key_blocks': {
- 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
- 'lines': [
- ['Key_blocks_unused', 'unused', 'absolute'],
- ['Key_blocks_used', 'used', 'absolute', -1, 1],
- ['Key_blocks_not_flushed', 'not flushed', 'absolute']
- ]},
- 'key_requests': {
- 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
- 'lines': [
- ['Key_read_requests', 'reads', 'incremental'],
- ['Key_write_requests', 'writes', 'incremental', -1, 1]
- ]},
- 'key_disk_ops': {
- 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s',
- 'myisam', 'mysql.key_disk_ops', 'area'],
- 'lines': [
- ['Key_reads', 'reads', 'incremental'],
- ['Key_writes', 'writes', 'incremental', -1, 1]
- ]},
- 'files': {
- 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
- 'lines': [
- ['Open_files', 'files', 'absolute']
- ]},
- 'files_rate': {
- 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
- 'lines': [
- ['Opened_files', 'files', 'incremental']
- ]},
- 'binlog_stmt_cache': {
- 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog',
- 'mysql.binlog_stmt_cache', 'line'],
- 'lines': [
- ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
- ['Binlog_stmt_cache_use', 'all', 'incremental']
- ]},
- 'connection_errors': {
- 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections',
- 'mysql.connection_errors', 'line'],
- 'lines': [
- ['Connection_errors_accept', 'accept', 'incremental'],
- ['Connection_errors_internal', 'internal', 'incremental'],
- ['Connection_errors_max_connections', 'max', 'incremental'],
- ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
- ['Connection_errors_select', 'select', 'incremental'],
- ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
- ]},
- 'slave_behind': {
- 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
- 'lines': [
- ['Seconds_Behind_Master', 'seconds', 'absolute']
- ]},
- 'slave_status': {
- 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
- 'lines': [
- ['Slave_SQL_Running', 'sql_running', 'absolute'],
- ['Slave_IO_Running', 'io_running', 'absolute']
- ]},
- 'galera_writesets': {
- 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
- 'lines': [
- ['wsrep_received', 'rx', 'incremental'],
- ['wsrep_replicated', 'tx', 'incremental', -1, 1],
- ]},
- 'galera_bytes': {
- 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
- 'lines': [
- ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
- ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
- ]},
- 'galera_queue': {
- 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
- 'lines': [
- ['wsrep_local_recv_queue', 'rx', 'absolute'],
- ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
- ]},
- 'galera_conflicts': {
- 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
- 'lines': [
- ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
- ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
- ]},
- 'galera_flow_control': {
- 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
- 'lines': [
- ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
- ]}
-}
-
-
-class Service(MySQLService):
- def __init__(self, configuration=None, name=None):
- MySQLService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE)
-
- def _get_data(self):
-
- raw_data = self._get_raw_data(description=True)
-
- if not raw_data:
- return None
-
- to_netdata = dict()
-
- if 'global_status' in raw_data:
- global_status = dict(raw_data['global_status'][0])
- for key in GLOBAL_STATS:
- if key in global_status:
- to_netdata[key] = global_status[key]
- if 'Threads_created' in to_netdata and 'Connections' in to_netdata:
- to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created'])
- / float(to_netdata['Connections']) * 10000)
-
- if 'slave_status' in raw_data:
- if raw_data['slave_status'][0]:
- slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0]))
- for key, func in SLAVE_STATS:
- if key in slave_raw_data:
- to_netdata[key] = func(slave_raw_data[key])
- else:
- self.queries.pop('slave_status')
-
- return to_netdata or None
-
diff --git a/python.d/nginx.chart.py b/python.d/nginx.chart.py
deleted file mode 100644
index 2e4f0d1b5..000000000
--- a/python.d/nginx.chart.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nginx netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost/stub_status'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
-
-CHARTS = {
- 'connections': {
- 'options': [None, 'nginx Active Connections', 'connections', 'active connections',
- 'nginx.connections', 'line'],
- 'lines': [
- ["active"]
- ]},
- 'requests': {
- 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
- 'lines': [
- ["requests", None, 'incremental']
- ]},
- 'connection_status': {
- 'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
- 'nginx.connection_status', 'line'],
- 'lines': [
- ["reading"],
- ["writing"],
- ["waiting", "idle"]
- ]},
- 'connect_rate': {
- 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
- 'nginx.connect_rate', 'line'],
- 'lines': [
- ["accepts", "accepted", "incremental"],
- ["handled", None, "incremental"]
- ]}
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost/stub_status')
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw = self._get_raw_data().split(" ")
- return {'active': int(raw[2]),
- 'requests': int(raw[9]),
- 'reading': int(raw[11]),
- 'writing': int(raw[13]),
- 'waiting': int(raw[15]),
- 'accepts': int(raw[7]),
- 'handled': int(raw[8])}
- except (ValueError, AttributeError):
- return None
diff --git a/python.d/nginx_plus.chart.py b/python.d/nginx_plus.chart.py
deleted file mode 100644
index 509ddd380..000000000
--- a/python.d/nginx_plus.chart.py
+++ /dev/null
@@ -1,491 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nginx_plus netdata python.d module
-# Author: Ilya Mashchenko (l2isbad)
-
-import re
-
-from collections import defaultdict
-from copy import deepcopy
-from json import loads
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests_total', 'requests_current',
- 'connections_statistics', 'connections_workers',
- 'ssl_handshakes', 'ssl_session_reuses', 'ssl_memory_usage',
- 'processes']
-
-CHARTS = {
- 'requests_total': {
- 'options': [None, 'Requests Total', 'requests/s',
- 'requests', 'nginx_plus.requests_total', 'line'],
- 'lines': [
- ['requests_total', 'total', 'incremental']
- ]},
- 'requests_current': {
- 'options': [None, 'Requests Current', 'requests',
- 'requests', 'nginx_plus.requests_current', 'line'],
- 'lines': [
- ['requests_current', 'current']
- ]},
- 'connections_statistics': {
- 'options': [None, 'Connections Statistics', 'connections/s',
- 'connections', 'nginx_plus.connections_statistics', 'stacked'],
- 'lines': [
- ['connections_accepted', 'accepted', 'incremental'],
- ['connections_dropped', 'dropped', 'incremental']
- ]},
- 'connections_workers': {
- 'options': [None, 'Workers Statistics', 'workers',
- 'connections', 'nginx_plus.connections_workers', 'stacked'],
- 'lines': [
- ['connections_idle', 'idle'],
- ['connections_active', 'active']
- ]},
- 'ssl_handshakes': {
- 'options': [None, 'SSL Handshakes', 'handshakes/s',
- 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
- 'lines': [
- ['ssl_handshakes', 'successful', 'incremental'],
- ['ssl_handshakes_failed', 'failed', 'incremental']
- ]},
- 'ssl_session_reuses': {
- 'options': [None, 'Session Reuses', 'sessions/s',
- 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
- 'lines': [
- ['ssl_session_reuses', 'reused', 'incremental']
- ]},
- 'ssl_memory_usage': {
- 'options': [None, 'Memory Usage', '%',
- 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
- 'lines': [
- ['ssl_memory_usage', 'usage', 'absolute', 1, 100]
- ]},
- 'processes': {
- 'options': [None, 'Processes', 'processes',
- 'processes', 'nginx_plus.processes', 'line'],
- 'lines': [
- ['processes_respawned', 'respawned']
- ]}
-}
-
-
-def cache_charts(cache):
- family = 'cache {0}'.format(cache.real_name)
- charts = OrderedDict()
-
- charts['{0}_traffic'.format(cache.name)] = {
- 'options': [None, 'Traffic', 'KB', family,
- 'nginx_plus.cache_traffic', 'stacked'],
- 'lines': [
- ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
- ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
- ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
- ]
- }
- charts['{0}_memory_usage'.format(cache.name)] = {
- 'options': [None, 'Memory Usage', '%', family,
- 'nginx_plus.cache_memory_usage', 'area'],
- 'lines': [
- ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
- ]
- }
- return charts
-
-
-def web_zone_charts(wz):
- charts = OrderedDict()
- family = 'web zone {name}'.format(name=wz.real_name)
-
- # Processing
- charts['zone_{name}_processing'.format(name=wz.name)] = {
- 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family,
- 'nginx_plus.web_zone_processing', 'line'],
- 'lines': [
- ['_'.join([wz.name, 'processing']), 'processing']
- ]
- }
- # Requests
- charts['zone_{name}_requests'.format(name=wz.name)] = {
- 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family,
- 'nginx_plus.web_zone_requests', 'line'],
- 'lines': [
- ['_'.join([wz.name, 'requests']), 'requests', 'incremental']
- ]
- }
- # Response Codes
- charts['zone_{name}_responses'.format(name=wz.name)] = {
- 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family,
- 'nginx_plus.web_zone_responses', 'stacked'],
- 'lines': [
- ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'],
- ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'],
- ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'],
- ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'],
- ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental']
- ]
- }
- # Traffic
- charts['zone_{name}_net'.format(name=wz.name)] = {
- 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family,
- 'nginx_plus.zone_net', 'area'],
- 'lines': [
- ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000],
- ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000]
- ]
- }
- return charts
-
-
-def web_upstream_charts(wu):
- def dimensions(value, a='absolute', m=1, d=1):
- dims = list()
- for p in wu:
- dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d])
- return dims
-
- charts = OrderedDict()
- family = 'web upstream {name}'.format(name=wu.real_name)
-
- # Requests
- charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
- 'options': [None, 'Peers Requests', 'requests/s', family,
- 'nginx_plus.web_upstream_requests', 'line'],
- 'lines': dimensions('requests', 'incremental')
- }
- # Responses Codes
- charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = {
- 'options': [None, 'All Peers Responses', 'responses/s', family,
- 'nginx_plus.web_upstream_all_responses', 'stacked'],
- 'lines': [
- ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'],
- ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'],
- ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'],
- ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'],
- ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'],
- ]
- }
- for peer in wu:
- charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.id)] = {
- 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
- 'nginx_plus.web_upstream_peer_responses', 'stacked'],
- 'lines': [
- ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'],
- ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'],
- ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'],
- ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'],
- ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental']
- ]
- }
- # Connections
- charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections', 'active', family,
- 'nginx_plus.web_upstream_connections', 'line'],
- 'lines': dimensions('active')
- }
- charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections Usage', '%', family,
- 'nginx_plus.web_upstream_connections_usage', 'line'],
- 'lines': dimensions('connections_usage', d=100)
- }
- # Traffic
- charts['web_upstream_{0}_all_net'.format(wu.name)] = {
- 'options': [None, 'All Peers Traffic', 'kilobits/s', family,
- 'nginx_plus.web_upstream_all_net', 'area'],
- 'lines': [
- ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
- ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
- ]
- }
- for peer in wu:
- charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.id)] = {
- 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
- 'nginx_plus.web_upstream_peer_traffic', 'area'],
- 'lines': [
- ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000],
- ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000]
- ]
- }
- # Response Time
- for peer in wu:
- charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.id)] = {
- 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
- 'nginx_plus.web_upstream_peer_timings', 'line'],
- 'lines': [
- ['_'.join([wu.name, peer.server, 'header_time']), 'header'],
- ['_'.join([wu.name, peer.server, 'response_time']), 'response']
- ]
- }
- # Memory Usage
- charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
- 'options': [None, 'Memory Usage', '%', family,
- 'nginx_plus.web_upstream_memory_usage', 'area'],
- 'lines': [
- ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
- ]
- }
- # State
- charts['web_upstream_{name}_status'.format(name=wu.name)] = {
- 'options': [None, 'Peers Status', 'state', family,
- 'nginx_plus.web_upstream_status', 'line'],
- 'lines': dimensions('state')
- }
- # Downtime
- charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
- 'options': [None, 'Peers Downtime', 'seconds', family,
- 'nginx_plus.web_upstream_peer_downtime', 'line'],
- 'lines': dimensions('downtime', d=1000)
- }
-
- return charts
-
-
-METRICS = dict(
- SERVER=[
- 'processes.respawned',
- 'connections.accepted',
- 'connections.dropped',
- 'connections.active',
- 'connections.idle',
- 'ssl.handshakes',
- 'ssl.handshakes_failed',
- 'ssl.session_reuses',
- 'requests.total',
- 'requests.current',
- 'slabs.SSL.pages.free',
- 'slabs.SSL.pages.used'
- ],
- WEB_ZONE=[
- 'processing',
- 'requests',
- 'responses.1xx',
- 'responses.2xx',
- 'responses.3xx',
- 'responses.4xx',
- 'responses.5xx',
- 'discarded',
- 'received',
- 'sent'
- ],
- WEB_UPSTREAM_PEER=[
- 'id',
- 'server',
- 'name',
- 'state',
- 'active',
- 'max_conns',
- 'requests',
- 'header_time', # alive only
- 'response_time', # alive only
- 'responses.1xx',
- 'responses.2xx',
- 'responses.3xx',
- 'responses.4xx',
- 'responses.5xx',
- 'sent',
- 'received',
- 'downtime'
- ],
- WEB_UPSTREAM_SUMMARY=[
- 'responses.1xx',
- 'responses.2xx',
- 'responses.3xx',
- 'responses.4xx',
- 'responses.5xx',
- 'sent',
- 'received'
- ],
- CACHE=[
- 'hit.bytes', # served
- 'miss.bytes_written', # written
- 'miss.bytes' # bypass
-
- ]
-)
-
-BAD_SYMBOLS = re.compile(r'[:/.-]+')
-
-
-class Cache:
- key = 'caches'
- charts = cache_charts
-
- def __init__(self, **kw):
- self.real_name = kw['name']
- self.name = BAD_SYMBOLS.sub('_', self.real_name)
-
- def memory_usage(self, data):
- used = data['slabs'][self.real_name]['pages']['used']
- free = data['slabs'][self.real_name]['pages']['free']
- return used / float(free + used) * 1e4
-
- def get_data(self, raw_data):
- zone_data = raw_data['caches'][self.real_name]
- data = parse_json(zone_data, METRICS['CACHE'])
- data['memory_usage'] = self.memory_usage(raw_data)
- return dict(('_'.join([self.name, k]), v) for k, v in data.items())
-
-
-class WebZone:
- key = 'server_zones'
- charts = web_zone_charts
-
- def __init__(self, **kw):
- self.real_name = kw['name']
- self.name = BAD_SYMBOLS.sub('_', self.real_name)
-
- def get_data(self, raw_data):
- zone_data = raw_data['server_zones'][self.real_name]
- data = parse_json(zone_data, METRICS['WEB_ZONE'])
- return dict(('_'.join([self.name, k]), v) for k, v in data.items())
-
-
-class WebUpstream:
- key = 'upstreams'
- charts = web_upstream_charts
-
- def __init__(self, **kw):
- self.real_name = kw['name']
- self.name = BAD_SYMBOLS.sub('_', self.real_name)
- self.peers = OrderedDict()
-
- peers = kw['response']['upstreams'][self.real_name]['peers']
- for peer in peers:
- self.add_peer(peer['id'], peer['server'])
-
- def __iter__(self):
- return iter(self.peers.values())
-
- def add_peer(self, idx, server):
- peer = WebUpstreamPeer(idx, server)
- self.peers[peer.real_server] = peer
- return peer
-
- def peers_stats(self, peers):
- data = dict()
- for peer in self.peers.values():
- if not peer.active:
- continue
- try:
- data.update(peer.get_data(peers[peer.id]))
- except KeyError:
- peer.active = False
- return data
-
- def memory_usage(self, data):
- used = data['slabs'][self.real_name]['pages']['used']
- free = data['slabs'][self.real_name]['pages']['free']
- return used / float(free + used) * 1e4
-
- def summary_stats(self, data):
- rv = defaultdict(int)
- for metric in METRICS['WEB_UPSTREAM_SUMMARY']:
- for peer in self.peers.values():
- if peer.active:
- metric = '_'.join(metric.split('.'))
- rv[metric] += data['_'.join([peer.server, metric])]
- return rv
-
- def get_data(self, raw_data):
- data = dict()
- peers = raw_data['upstreams'][self.real_name]['peers']
- data.update(self.peers_stats(peers))
- data.update(self.summary_stats(data))
- data['memory_usage'] = self.memory_usage(raw_data)
- return dict(('_'.join([self.name, k]), v) for k, v in data.items())
-
-
-class WebUpstreamPeer:
- def __init__(self, idx, server):
- self.id = idx
- self.real_server = server
- self.server = BAD_SYMBOLS.sub('_', self.real_server)
- self.active = True
-
- def get_data(self, raw):
- data = dict(header_time=0, response_time=0, max_conns=0)
- data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER']))
- data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4
- data['state'] = int(data['state'] == 'up')
- return dict(('_'.join([self.server, k]), v) for k, v in data.items())
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = list(ORDER)
- self.definitions = deepcopy(CHARTS)
- self.objects = dict()
-
- def check(self):
- if not self.url:
- self.error('URL is not defined')
- return None
-
- self._manager = self._build_manager()
- if not self._manager:
- return None
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- try:
- response = loads(raw_data)
- except ValueError:
- return None
-
- for obj_cls in [WebZone, WebUpstream, Cache]:
- for obj_name in response.get(obj_cls.key, list()):
- obj = obj_cls(name=obj_name, response=response)
- self.objects[obj.real_name] = obj
- charts = obj_cls.charts(obj)
- for chart in charts:
- self.order.append(chart)
- self.definitions[chart] = charts[chart]
-
- return bool(self.objects)
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
- response = loads(raw_data)
-
- data = parse_json(response, METRICS['SERVER'])
- data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4
-
- for obj in self.objects.values():
- if obj.real_name in response[obj.key]:
- data.update(obj.get_data(response))
-
- return data
-
-
-def parse_json(raw_data, metrics):
- data = dict()
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError:
- continue
- data['_'.join(metrics_list)] = value
- return data
diff --git a/python.d/nsd.chart.py b/python.d/nsd.chart.py
deleted file mode 100644
index 499dfda2e..000000000
--- a/python.d/nsd.chart.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: NSD `nsd-control stats_noreset` netdata python.d module
-# Author: <383c57 at gmail.com>
-
-import re
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 5
-update_every = 30
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
-
-CHARTS = {
- 'queries': {
- 'options': [
- None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'],
- 'lines': [
- ['num_queries', 'queries', 'incremental'],]},
- 'zones': {
- 'options': [
- None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'],
- 'lines': [
- ['zone_master', 'master', 'absolute'],
- ['zone_slave', 'slave', 'absolute'],]},
- 'protocol': {
- 'options': [
- None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
- 'lines': [
- ['num_udp', 'udp', 'incremental'],
- ['num_udp6', 'udp6', 'incremental'],
- ['num_tcp', 'tcp', 'incremental'],
- ['num_tcp6', 'tcp6', 'incremental'],]},
- 'type': {
- 'options': [
- None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'],
- 'lines': [
- ['num_type_A', 'A', 'incremental'],
- ['num_type_NS', 'NS', 'incremental'],
- ['num_type_CNAME', 'CNAME', 'incremental'],
- ['num_type_SOA', 'SOA', 'incremental'],
- ['num_type_PTR', 'PTR', 'incremental'],
- ['num_type_HINFO', 'HINFO', 'incremental'],
- ['num_type_MX', 'MX', 'incremental'],
- ['num_type_NAPTR', 'NAPTR', 'incremental'],
- ['num_type_TXT', 'TXT', 'incremental'],
- ['num_type_AAAA', 'AAAA', 'incremental'],
- ['num_type_SRV', 'SRV', 'incremental'],
- ['num_type_TYPE255', 'ANY', 'incremental'],]},
- 'transfer': {
- 'options': [
- None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
- 'lines': [
- ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
- ['num_type_TYPE252', 'AXFR', 'incremental'],]},
- 'rcode': {
- 'options': [
- None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
- 'lines': [
- ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
- ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
- ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
- ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
- ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
- ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
- ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]}
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(
- self, configuration=configuration, name=name)
- self.command = "nsd-control stats_noreset"
- self.order = ORDER
- self.definitions = CHARTS
- self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
-
- def _get_data(self):
- lines = self._get_raw_data()
- if not lines:
- return None
-
- r = self.regex
- stats = dict((k.replace('.', '_'), int(v))
- for k, v in r.findall(''.join(lines)))
- stats.setdefault('num_opcode_NOTIFY', 0)
- stats.setdefault('num_type_TYPE252', 0)
- stats.setdefault('num_type_TYPE255', 0)
- return stats
diff --git a/python.d/ntpd.chart.py b/python.d/ntpd.chart.py
deleted file mode 100644
index 05209da87..000000000
--- a/python.d/ntpd.chart.py
+++ /dev/null
@@ -1,380 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: ntpd netdata python.d module
-# Author: Sven Mäder (rda0)
-# Author: Ilya Mashchenko (l2isbad)
-
-import struct
-import re
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values
-update_every = 1
-priority = 60000
-retries = 60
-
-# NTP Control Message Protocol constants
-MODE = 6
-HEADER_FORMAT = '!BBHHHHH'
-HEADER_LEN = 12
-OPCODES = {
- 'readstat': 1,
- 'readvar': 2
-}
-
-# Maximal dimension precision
-PRECISION = 1000000
-
-# Static charts
-ORDER = [
- 'sys_offset',
- 'sys_jitter',
- 'sys_frequency',
- 'sys_wander',
- 'sys_rootdelay',
- 'sys_rootdisp',
- 'sys_stratum',
- 'sys_tc',
- 'sys_precision',
- 'peer_offset',
- 'peer_delay',
- 'peer_dispersion',
- 'peer_jitter',
- 'peer_xleave',
- 'peer_rootdelay',
- 'peer_rootdisp',
- 'peer_stratum',
- 'peer_hmode',
- 'peer_pmode',
- 'peer_hpoll',
- 'peer_ppoll',
- 'peer_precision'
-]
-
-CHARTS = {
- 'sys_offset': {
- 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
- 'lines': [
- ['offset', 'offset', 'absolute', 1, PRECISION]
- ]},
- 'sys_jitter': {
- 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
- 'lines': [
- ['sys_jitter', 'system', 'absolute', 1, PRECISION],
- ['clk_jitter', 'clock', 'absolute', 1, PRECISION]
- ]},
- 'sys_frequency': {
- 'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'],
- 'lines': [
- ['frequency', 'frequency', 'absolute', 1, PRECISION]
- ]},
- 'sys_wander': {
- 'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'],
- 'lines': [
- ['clk_wander', 'clock', 'absolute', 1, PRECISION]
- ]},
- 'sys_rootdelay': {
- 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
- 'ntpd.sys_rootdelay', 'area'],
- 'lines': [
- ['rootdelay', 'delay', 'absolute', 1, PRECISION]
- ]},
- 'sys_rootdisp': {
- 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
- 'ntpd.sys_rootdisp', 'area'],
- 'lines': [
- ['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
- ]},
- 'sys_stratum': {
- 'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'],
- 'lines': [
- ['stratum', 'stratum', 'absolute', 1, PRECISION]
- ]},
- 'sys_tc': {
- 'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'],
- 'lines': [
- ['tc', 'current', 'absolute', 1, PRECISION],
- ['mintc', 'minimum', 'absolute', 1, PRECISION]
- ]},
- 'sys_precision': {
- 'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'],
- 'lines': [
- ['precision', 'precision', 'absolute', 1, PRECISION]
- ]}
-}
-
-PEER_CHARTS = {
- 'peer_offset': {
- 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
- 'lines': [
- ]},
- 'peer_delay': {
- 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
- 'lines': [
- ]},
- 'peer_dispersion': {
- 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
- 'lines': [
- ]},
- 'peer_jitter': {
- 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
- 'lines': [
- ]},
- 'peer_xleave': {
- 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
- 'lines': [
- ]},
- 'peer_rootdelay': {
- 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
- 'ntpd.peer_rootdelay', 'line'],
- 'lines': [
- ]},
- 'peer_rootdisp': {
- 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers',
- 'ntpd.peer_rootdisp', 'line'],
- 'lines': [
- ]},
- 'peer_stratum': {
- 'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'],
- 'lines': [
- ]},
- 'peer_hmode': {
- 'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'],
- 'lines': [
- ]},
- 'peer_pmode': {
- 'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'],
- 'lines': [
- ]},
- 'peer_hpoll': {
- 'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'],
- 'lines': [
- ]},
- 'peer_ppoll': {
- 'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'],
- 'lines': [
- ]},
- 'peer_precision': {
- 'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'],
- 'lines': [
- ]}
-}
-
-
-class Base:
- regex = re.compile(r'([a-z_]+)=((?:-)?[0-9]+(?:\.[0-9]+)?)')
-
- @staticmethod
- def get_header(associd=0, operation='readvar'):
- """
- Constructs the NTP Control Message header:
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |LI | VN |Mode |R|E|M| OpCode | Sequence Number |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Status | Association ID |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Offset | Count |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- """
- version = 2
- sequence = 1
- status = 0
- offset = 0
- count = 0
- header = struct.pack(HEADER_FORMAT, (version << 3 | MODE), OPCODES[operation],
- sequence, status, associd, offset, count)
- return header
-
-
-class System(Base):
- def __init__(self):
- self.request = self.get_header()
-
- def get_data(self, raw):
- """
- Extracts key=value pairs with float/integer from ntp response packet data.
- """
- data = dict()
- for key, value in self.regex.findall(raw):
- data[key] = float(value) * PRECISION
- return data
-
-
-class Peer(Base):
- def __init__(self, idx, name):
- self.id = idx
- self.real_name = name
- self.name = name.replace('.', '_')
- self.request = self.get_header(self.id)
-
- def get_data(self, raw):
- """
- Extracts key=value pairs with float/integer from ntp response packet data.
- """
- data = dict()
- for key, value in self.regex.findall(raw):
- dimension = '_'.join([self.name, key])
- data[dimension] = float(value) * PRECISION
- return data
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = list(ORDER)
- self.definitions = dict(CHARTS)
-
- self.port = 'ntp'
- self.dgram_socket = True
- self.system = System()
- self.peers = dict()
- self.request = str()
- self.retries = 0
- self.show_peers = self.configuration.get('show_peers', False)
- self.peer_rescan = self.configuration.get('peer_rescan', 60)
-
- if self.show_peers:
- self.definitions.update(PEER_CHARTS)
-
- def check(self):
- """
- Checks if we can get valid systemvars.
- If not, returns None to disable module.
- """
- self._parse_config()
-
- peer_filter = self.configuration.get('peer_filter', r'127\..*')
- try:
- self.peer_filter = re.compile(r'^((0\.0\.0\.0)|({0}))$'.format(peer_filter))
- except re.error as error:
- self.error('Compile pattern error (peer_filter) : {0}'.format(error))
- return None
-
- self.request = self.system.request
- raw_systemvars = self._get_raw_data()
-
- if not self.system.get_data(raw_systemvars):
- return None
-
- return True
-
- def get_data(self):
- """
- Gets systemvars data on each update.
- Gets peervars data for all peers on each update.
- """
- data = dict()
-
- self.request = self.system.request
- raw = self._get_raw_data()
- if not raw:
- return None
-
- data.update(self.system.get_data(raw))
-
- if not self.show_peers:
- return data
-
- if not self.peers or self.runs_counter % self.peer_rescan == 0 or self.retries > 8:
- self.find_new_peers()
-
- for peer in self.peers.values():
- self.request = peer.request
- peer_data = peer.get_data(self._get_raw_data())
- if peer_data:
- data.update(peer_data)
- else:
- self.retries += 1
-
- return data
-
- def find_new_peers(self):
- new_peers = dict((p.real_name, p) for p in self.get_peers())
- if new_peers:
-
- peers_to_remove = set(self.peers) - set(new_peers)
- peers_to_add = set(new_peers) - set(self.peers)
-
- for peer_name in peers_to_remove:
- self.hide_old_peer_from_charts(self.peers[peer_name])
- del self.peers[peer_name]
-
- for peer_name in peers_to_add:
- self.add_new_peer_to_charts(new_peers[peer_name])
-
- self.peers.update(new_peers)
- self.retries = 0
-
- def add_new_peer_to_charts(self, peer):
- for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
- dim_id = peer.name + chart_id[4:]
- if dim_id not in self.charts[chart_id]:
- self.charts[chart_id].add_dimension([dim_id, peer.real_name, 'absolute', 1, PRECISION])
- else:
- self.charts[chart_id].hide_dimension(dim_id, reverse=True)
-
- def hide_old_peer_from_charts(self, peer):
- for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
- dim_id = peer.name + chart_id[4:]
- self.charts[chart_id].hide_dimension(dim_id)
-
- def get_peers(self):
- self.request = Base.get_header(operation='readstat')
-
- raw_data = self._get_raw_data(raw=True)
- if not raw_data:
- return list()
-
- peer_ids = self.get_peer_ids(raw_data)
- if not peer_ids:
- return list()
-
- new_peers = list()
- for peer_id in peer_ids:
- self.request = Base.get_header(peer_id)
- raw_peer_data = self._get_raw_data()
- if not raw_peer_data:
- continue
- srcadr = re.search(r'(srcadr)=([^,]+)', raw_peer_data)
- if not srcadr:
- continue
- srcadr = srcadr.group(2)
- if self.peer_filter.search(srcadr):
- continue
- stratum = re.search(r'(stratum)=([^,]+)', raw_peer_data)
- if not stratum:
- continue
- if int(stratum.group(2)) > 15:
- continue
-
- new_peer = Peer(idx=peer_id, name=srcadr)
- new_peers.append(new_peer)
- return new_peers
-
- def get_peer_ids(self, res):
- """
- Unpack the NTP Control Message header
- Get data length from header
- Get list of association ids returned in the readstat response
- """
-
- try:
- count = struct.unpack(HEADER_FORMAT, res[:HEADER_LEN])[6]
- except struct.error as error:
- self.error('error unpacking header: {0}'.format(error))
- return None
- if not count:
- self.error('empty data field in NTP control packet')
- return None
-
- data_end = HEADER_LEN + count
- data = res[HEADER_LEN:data_end]
- data_format = ''.join(['!', 'H' * int(count / 2)])
- try:
- peer_ids = list(struct.unpack(data_format, data))[::2]
- except struct.error as error:
- self.error('error unpacking data: {0}'.format(error))
- return None
- return peer_ids
diff --git a/python.d/ovpn_status_log.chart.py b/python.d/ovpn_status_log.chart.py
deleted file mode 100644
index 519c77fa3..000000000
--- a/python.d/ovpn_status_log.chart.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: openvpn status log netdata python.d module
-# Author: l2isbad
-
-from re import compile as r_compile
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 60000
-retries = 60
-update_every = 10
-
-ORDER = ['users', 'traffic']
-CHARTS = {
- 'users': {
- 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
- 'lines': [
- ['users', None, 'absolute'],
- ]},
- 'traffic': {
- 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
- 'lines': [
- ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
- ]},
-
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.log_path = self.configuration.get('log_path')
- self.regex = dict(tls=r_compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d+)? (?P<bytes_in>\d+) (?P<bytes_out>\d+)'),
- static_key=r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'))
-
- def check(self):
- if not (self.log_path and isinstance(self.log_path, str)):
- self.error("'log_path' is not defined")
- return False
-
- data = self._get_raw_data()
- if not data:
- self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
- return None
-
- found = None
- for row in data:
- if 'ROUTING' in row:
- self.get_data = self.get_data_tls
- found = True
- break
- elif 'STATISTICS' in row:
- self.get_data = self.get_data_static_key
- found = True
- break
- if found:
- return True
- self.error("Failed to parse ovpenvpn log file")
- return False
-
- def _get_raw_data(self):
- """
- Open log file
- :return: str
- """
-
- try:
- with open(self.log_path) as log:
- raw_data = log.readlines() or None
- except OSError:
- return None
- else:
- return raw_data
-
- def get_data_static_key(self):
- """
- Parse openvpn-status log file.
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- data = dict(bytes_in=0, bytes_out=0)
-
- for row in raw_data:
- match = self.regex['static_key'].search(row)
- if match:
- match = match.groupdict()
- if match['direction'] == 'read':
- data['bytes_in'] += int(match['bytes'])
- else:
- data['bytes_out'] += int(match['bytes'])
-
- return data or None
-
- def get_data_tls(self):
- """
- Parse openvpn-status log file.
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- data = dict(users=0, bytes_in=0, bytes_out=0)
- for row in raw_data:
- row = ' '.join(row.split(',')) if ',' in row else ' '.join(row.split())
- match = self.regex['tls'].search(row)
- if match:
- match = match.groupdict()
- data['users'] += 1
- data['bytes_in'] += int(match['bytes_in'])
- data['bytes_out'] += int(match['bytes_out'])
-
- return data or None
diff --git a/python.d/phpfpm.chart.py b/python.d/phpfpm.chart.py
deleted file mode 100644
index ea7a9a7e6..000000000
--- a/python.d/phpfpm.chart.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: PHP-FPM netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-import json
-import re
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost/status?full&json'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-
-POOL_INFO = [
- ('active processes', 'active'),
- ('max active processes', 'maxActive'),
- ('idle processes', 'idle'),
- ('accepted conn', 'requests'),
- ('max children reached', 'reached'),
- ('slow requests', 'slow')
-]
-
-PER_PROCESS_INFO = [
- ('request duration', 'ReqDur'),
- ('last request cpu', 'ReqCpu'),
- ('last request memory', 'ReqMem')
-]
-
-
-def average(collection):
- return sum(collection, 0.0) / max(len(collection), 1)
-
-
-CALC = [
- ('min', min),
- ('max', max),
- ('avg', average)
-]
-
-ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
-
-CHARTS = {
- 'connections': {
- 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections',
- 'line'],
- 'lines': [
- ['active'],
- ['maxActive', 'max active'],
- ['idle']
- ]},
- 'requests': {
- 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
- 'lines': [
- ['requests', None, 'incremental']
- ]},
- 'performance': {
- 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
- 'lines': [
- ['reached', 'max children reached'],
- ['slow', 'slow requests']
- ]},
- 'request_duration': {
- 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
- 'line'],
- 'lines': [
- ['minReqDur', 'min', 'absolute', 1, 1000],
- ['maxReqDur', 'max', 'absolute', 1, 1000],
- ['avgReqDur', 'avg', 'absolute', 1, 1000]
- ]},
- 'request_cpu': {
- 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
- 'lines': [
- ['minReqCpu', 'min'],
- ['maxReqCpu', 'max'],
- ['avgReqCpu', 'avg']
- ]},
- 'request_mem': {
- 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
- 'lines': [
- ['minReqMem', 'min', 'absolute', 1, 1024],
- ['maxReqMem', 'max', 'absolute', 1, 1024],
- ['avgReqMem', 'avg', 'absolute', 1, 1024]
- ]}
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost/status?full&json')
- self.order = ORDER
- self.definitions = CHARTS
- self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
- self.json = '&json' in self.url or '?json' in self.url
- self.json_full = self.url.endswith(('?full&json', '?json&full'))
- self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC
- for metric, p_name in PER_PROCESS_INFO])
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw)
-
- # Per Pool info: active connections, requests and performance charts
- to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
-
- # Per Process Info: duration, cpu and memory charts (min, max, avg)
- if self.json_full:
- p_info = dict()
- to_netdata.update(self.if_all_processes_running) # If all processes are in running state
- # Metrics are always 0 if the process is not in Idle state because calculation is done
- # when the request processing has terminated
- for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']:
- p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid'])))
-
- if p_info:
- for new_name in PER_PROCESS_INFO:
- for name, func in CALC:
- to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k])
-
- return to_netdata or None
-
-
-def fetch_data_(raw_data, metrics_list, pid=''):
- """
- :param raw_data: dict
- :param metrics_list: list
- :param pid: str
- :return: dict
- """
- result = dict()
- for metric, new_name in metrics_list:
- if metric in raw_data:
- result[new_name + pid] = float(raw_data[metric])
- return result
-
-
-def parse_raw_data_(is_json, regex, raw_data):
- """
- :param is_json: bool
- :param regex: compiled regular expr
- :param raw_data: dict
- :return: dict
- """
- if is_json:
- try:
- return json.loads(raw_data)
- except ValueError:
- return dict()
- else:
- raw_data = ' '.join(raw_data.split())
- return dict(regex.findall(raw_data))
diff --git a/python.d/portcheck.chart.py b/python.d/portcheck.chart.py
deleted file mode 100644
index 0a312210d..000000000
--- a/python.d/portcheck.chart.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: simple port check netdata python.d module
-# Original Author: ccremer (github.com/ccremer)
-
-import socket
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
-
-PORT_LATENCY = 'connect'
-
-PORT_SUCCESS = 'success'
-PORT_TIMEOUT = 'timeout'
-PORT_FAILED = 'no_connection'
-
-ORDER = ['latency', 'status']
-
-CHARTS = {
- 'latency': {
- 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'],
- 'lines': [
- [PORT_LATENCY, 'connect', 'absolute', 100, 1000]
- ]
- },
- 'status': {
- 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'],
- 'lines': [
- [PORT_SUCCESS, 'success', 'absolute'],
- [PORT_TIMEOUT, 'timeout', 'absolute'],
- [PORT_FAILED, 'no connection', 'absolute']
- ]}
-}
-
-
-# Not deriving from SocketService, too much is different
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host')
- self.port = self.configuration.get('port')
- self.timeout = self.configuration.get('timeout', 1)
-
- def check(self):
- """
- Parse configuration, check if configuration is available, and dynamically create chart lines data
- :return: boolean
- """
- if self.host is None or self.port is None:
- self.error("Host or port missing")
- return False
- if not isinstance(self.port, int):
- self.error('"port" is not an integer. Specify a numerical value, not service name.')
- return False
-
- self.debug("Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s".format(
- host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
- ))
- # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
- # the beginning)
- return True
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- data = dict()
- data[PORT_SUCCESS] = 0
- data[PORT_TIMEOUT] = 0
- data[PORT_FAILED] = 0
-
- success = False
- try:
- for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
- # use first working socket
- sock = self._create_socket(socket_config)
- if sock is not None:
- self._connect2socket(data, socket_config, sock)
- self._disconnect(sock)
- success = True
- break
- except socket.gaierror as error:
- self.debug('Failed to connect to "{host}:{port}", error: {error}'.format(
- host=self.host, port=self.port, error=error
- ))
-
- # We could not connect
- if not success:
- data[PORT_FAILED] = 1
-
- return data
-
- def _create_socket(self, socket_config):
- af, sock_type, proto, canon_name, sa = socket_config
- try:
- self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- sock = socket.socket(af, sock_type, proto)
- sock.settimeout(self.timeout)
- return sock
- except socket.error as error:
- self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format(
- address=sa[0], port=sa[1], error=error
- ))
- return None
-
- def _connect2socket(self, data, socket_config, sock):
- """
- Connect to a socket, passing the result of getaddrinfo()
- :return: dict
- """
-
- af, sock_type, proto, canon_name, sa = socket_config
- port = str(sa[1])
- try:
- self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
- start = time()
- sock.connect(sa)
- diff = time() - start
- self.debug('Connected to "{address}", port {port}, latency {latency}'.format(
- address=sa[0], port=port, latency=diff
- ))
- # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs)
- data[PORT_LATENCY] = max(round(diff * 10000), 0)
- data[PORT_SUCCESS] = 1
-
- except socket.timeout as error:
- self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format(
- address=sa[0], port=port, error=error
- ))
- data[PORT_TIMEOUT] = 1
-
- except socket.error as error:
- self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format(
- address=sa[0], port=port, error=error
- ))
- data[PORT_FAILED] = 1
-
- def _disconnect(self, sock):
- """
- Close socket connection
- :return:
- """
- if sock is not None:
- try:
- self.debug('Closing socket')
- sock.shutdown(2) # 0 - read, 1 - write, 2 - all
- sock.close()
- except socket.error:
- pass
diff --git a/python.d/postfix.chart.py b/python.d/postfix.chart.py
deleted file mode 100644
index a2129e4be..000000000
--- a/python.d/postfix.chart.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: postfix netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['qemails', 'qsize']
-
-CHARTS = {
- 'qemails': {
- 'options': [None, "Postfix Queue Emails", "emails", 'queue', 'postfix.qemails', 'line'],
- 'lines': [
- ['emails', None, 'absolute']
- ]},
- 'qsize': {
- 'options': [None, "Postfix Queue Emails Size", "emails size in KB", 'queue', 'postfix.qsize', 'area'],
- 'lines': [
- ["size", None, 'absolute']
- ]}
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = "postqueue -p"
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- try:
- raw = self._get_raw_data()[-1].split(' ')
- if raw[0] == 'Mail' and raw[1] == 'queue':
- return {'emails': 0,
- 'size': 0}
-
- return {'emails': raw[4],
- 'size': raw[1]}
- except (ValueError, AttributeError):
- return None
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py
deleted file mode 100644
index 0522b1938..000000000
--- a/python.d/postgres.chart.py
+++ /dev/null
@@ -1,666 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: example netdata python.d module
-# Authors: facetoe, dangtranhoang
-
-from copy import deepcopy
-
-try:
- import psycopg2
- from psycopg2 import extensions
- from psycopg2.extras import DictCursor
- from psycopg2 import OperationalError
- PSYCOPG2 = True
-except ImportError:
- PSYCOPG2 = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values
-update_every = 1
-priority = 60000
-retries = 60
-
-METRICS = dict(
- DATABASE=['connections',
- 'xact_commit',
- 'xact_rollback',
- 'blks_read',
- 'blks_hit',
- 'tup_returned',
- 'tup_fetched',
- 'tup_inserted',
- 'tup_updated',
- 'tup_deleted',
- 'conflicts',
- 'temp_files',
- 'temp_bytes',
- 'size'],
- BACKENDS=['backends_active',
- 'backends_idle'],
- INDEX_STATS=['index_count',
- 'index_size'],
- TABLE_STATS=['table_size',
- 'table_count'],
- WAL=['written_wal',
- 'recycled_wal',
- 'total_wal'],
- WAL_WRITES=['wal_writes'],
- ARCHIVE=['ready_count',
- 'done_count',
- 'file_count'],
- BGWRITER=['checkpoint_scheduled',
- 'checkpoint_requested',
- 'buffers_checkpoint',
- 'buffers_clean',
- 'maxwritten_clean',
- 'buffers_backend',
- 'buffers_alloc',
- 'buffers_backend_fsync'],
- LOCKS=['ExclusiveLock',
- 'RowShareLock',
- 'SIReadLock',
- 'ShareUpdateExclusiveLock',
- 'AccessExclusiveLock',
- 'AccessShareLock',
- 'ShareRowExclusiveLock',
- 'ShareLock',
- 'RowExclusiveLock'],
- AUTOVACUUM=['analyze',
- 'vacuum_analyze',
- 'vacuum',
- 'vacuum_freeze',
- 'brin_summarize'],
- STANDBY_DELTA=['sent_delta',
- 'write_delta',
- 'flush_delta',
- 'replay_delta'],
- REPSLOT_FILES=['replslot_wal_keep',
- 'replslot_files']
-
-)
-
-QUERIES = dict(
- WAL="""
-SELECT
- count(*) as total_wal,
- count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
- count(*) FILTER (WHERE type = 'written') AS written_wal
-FROM
- (SELECT wal.name,
- pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ),
- CASE WHEN wal.name > pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ) THEN 'recycled'
- ELSE 'written'
- END AS type
- FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
- WHERE name ~ '^[0-9A-F]{{24}}$'
- ORDER BY (pg_stat_file('pg_{0}/'||name)).modification, wal.name DESC) sub;
-""",
- ARCHIVE="""
-SELECT
- CAST(COUNT(*) AS INT) AS file_count,
- CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
- CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
-FROM
- pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file);
-""",
- BACKENDS="""
-SELECT
- count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
- (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
-FROM pg_stat_activity;
-""",
- TABLE_STATS="""
-SELECT
- ((sum(relpages) * 8) * 1024) AS table_size,
- count(1) AS table_count
-FROM pg_class
-WHERE relkind IN ('r', 't');
-""",
- INDEX_STATS="""
-SELECT
- ((sum(relpages) * 8) * 1024) AS index_size,
- count(1) AS index_count
-FROM pg_class
-WHERE relkind = 'i';""",
- DATABASE="""
-SELECT
- datname AS database_name,
- numbackends AS connections,
- xact_commit AS xact_commit,
- xact_rollback AS xact_rollback,
- blks_read AS blks_read,
- blks_hit AS blks_hit,
- tup_returned AS tup_returned,
- tup_fetched AS tup_fetched,
- tup_inserted AS tup_inserted,
- tup_updated AS tup_updated,
- tup_deleted AS tup_deleted,
- conflicts AS conflicts,
- pg_database_size(datname) AS size,
- temp_files AS temp_files,
- temp_bytes AS temp_bytes
-FROM pg_stat_database
-WHERE datname IN %(databases)s
-;
-""",
- BGWRITER="""
-SELECT
- checkpoints_timed AS checkpoint_scheduled,
- checkpoints_req AS checkpoint_requested,
- buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
- buffers_clean * current_setting('block_size')::numeric buffers_clean,
- maxwritten_clean,
- buffers_backend * current_setting('block_size')::numeric buffers_backend,
- buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
- buffers_backend_fsync
-FROM pg_stat_bgwriter;
-""",
- LOCKS="""
-SELECT
- pg_database.datname as database_name,
- mode,
- count(mode) AS locks_count
-FROM pg_locks
- INNER JOIN pg_database ON pg_database.oid = pg_locks.database
-GROUP BY datname, mode
-ORDER BY datname, mode;
-""",
- FIND_DATABASES="""
-SELECT datname
-FROM pg_stat_database
-WHERE has_database_privilege((SELECT current_user), datname, 'connect')
-AND NOT datname ~* '^template\d+';
-""",
- FIND_STANDBY="""
-SELECT application_name
-FROM pg_stat_replication
-WHERE application_name IS NOT NULL
-GROUP BY application_name;
-""",
- FIND_REPLICATION_SLOT="""
-SELECT slot_name
-FROM pg_replication_slots;
-""",
- STANDBY_DELTA="""
-SELECT application_name,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , sent_{1}) AS sent_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , write_{1}) AS write_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , flush_{1}) AS flush_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , replay_{1}) AS replay_delta
-FROM pg_stat_replication
-WHERE application_name IS NOT NULL;
-""",
- REPSLOT_FILES="""
-WITH wal_size AS (
- SELECT current_setting('wal_block_size')::INT * setting::INT AS val
- FROM pg_settings
- WHERE name = 'wal_segment_size'
-)
-SELECT slot_name, slot_type, replslot_wal_keep, count(slot_file) AS replslot_files
-FROM (
- SELECT slot.slot_name, CASE WHEN slot_file <> 'state' THEN 1 END AS slot_file , slot_type,
- COALESCE (floor((pg_wal_lsn_diff (pg_current_wal_lsn (),
- slot.restart_lsn) - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)),
- 0) AS replslot_wal_keep
- FROM pg_replication_slots slot
- LEFT JOIN (
- SELECT slot2.slot_name,
- pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
- FROM pg_replication_slots slot2
- ) files (slot_name, slot_file)
- ON slot.slot_name = files.slot_name
- CROSS JOIN wal_size s) AS d
-GROUP BY slot_name, slot_type, replslot_wal_keep;
-""",
- IF_SUPERUSER="""
-SELECT current_setting('is_superuser') = 'on' AS is_superuser;
-""",
- DETECT_SERVER_VERSION="""
-SHOW server_version_num;
-""",
- AUTOVACUUM="""
-SELECT
- count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
- AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
- AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
- count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
-FROM pg_stat_activity
-WHERE query NOT LIKE '%%pg_stat_activity%%';
-""",
- DIFF_LSN="""
-SELECT pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END, '0/0') as wal_writes ;
-"""
-
-)
-
-
-QUERY_STATS = {
- QUERIES['DATABASE']: METRICS['DATABASE'],
- QUERIES['BACKENDS']: METRICS['BACKENDS'],
- QUERIES['LOCKS']: METRICS['LOCKS']
-}
-
-ORDER = ['db_stat_temp_files', 'db_stat_temp_bytes', 'db_stat_blks', 'db_stat_tuple_returned', 'db_stat_tuple_write',
- 'db_stat_transactions','db_stat_connections', 'database_size', 'backend_process', 'index_count', 'index_size',
- 'table_count', 'table_size', 'wal', 'wal_writes', 'archive_wal', 'checkpointer', 'stat_bgwriter_alloc', 'stat_bgwriter_checkpoint',
- 'stat_bgwriter_backend', 'stat_bgwriter_backend_fsync' , 'stat_bgwriter_bgwriter', 'stat_bgwriter_maxwritten',
- 'replication_slot', 'standby_delta', 'autovacuum']
-
-CHARTS = {
- 'db_stat_transactions': {
- 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions',
- 'line'],
- 'lines': [
- ['xact_commit', 'committed', 'incremental'],
- ['xact_rollback', 'rolled back', 'incremental']
- ]},
- 'db_stat_connections': {
- 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
- 'line'],
- 'lines': [
- ['connections', 'connections', 'absolute']
- ]},
- 'db_stat_blks': {
- 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'],
- 'lines': [
- ['blks_read', 'disk', 'incremental'],
- ['blks_hit', 'cache', 'incremental']
- ]},
- 'db_stat_tuple_returned': {
- 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
- 'line'],
- 'lines': [
- ['tup_returned', 'sequential', 'incremental'],
- ['tup_fetched', 'bitmap', 'incremental']
- ]},
- 'db_stat_tuple_write': {
- 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
- 'lines': [
- ['tup_inserted', 'inserted', 'incremental'],
- ['tup_updated', 'updated', 'incremental'],
- ['tup_deleted', 'deleted', 'incremental'],
- ['conflicts', 'conflicts', 'incremental']
- ]},
- 'db_stat_temp_bytes': {
- 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'line'],
- 'lines': [
- ['temp_bytes', 'size', 'incremental', 1, 1024]
- ]},
- 'db_stat_temp_files': {
- 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files', 'line'],
- 'lines': [
- ['temp_files', 'files', 'incremental']
- ]},
- 'database_size': {
- 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
- 'lines': [
- ]},
- 'backend_process': {
- 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
- 'line'],
- 'lines': [
- ['backends_active', 'active', 'absolute'],
- ['backends_idle', 'idle', 'absolute']
- ]},
- 'index_count': {
- 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
- 'lines': [
- ['index_count', 'total', 'absolute']
- ]},
- 'index_size': {
- 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
- 'lines': [
- ['index_size', 'size', 'absolute', 1, 1024 * 1024]
- ]},
- 'table_count': {
- 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
- 'lines': [
- ['table_count', 'total', 'absolute']
- ]},
- 'table_size': {
- 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
- 'lines': [
- ['table_size', 'size', 'absolute', 1, 1024 * 1024]
- ]},
- 'wal': {
- 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'],
- 'lines': [
- ['written_wal', 'written', 'absolute'],
- ['recycled_wal', 'recycled', 'absolute'],
- ['total_wal', 'total', 'absolute']
- ]},
- 'wal_writes': {
- 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
- 'lines': [
- ['wal_writes', 'writes', 'incremental', 1, 1024]
- ]},
- 'archive_wal': {
- 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'],
- 'lines': [
- ['file_count', 'total', 'incremental'],
- ['ready_count', 'ready', 'incremental'],
- ['done_count', 'done', 'incremental']
- ]},
- 'checkpointer': {
- 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'],
- 'lines': [
- ['checkpoint_scheduled', 'scheduled', 'incremental'],
- ['checkpoint_requested', 'requested', 'incremental']
- ]},
- 'stat_bgwriter_alloc': {
- 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
- 'lines': [
- ['buffers_alloc', 'alloc', 'incremental', 1, 1024]
- ]},
- 'stat_bgwriter_checkpoint': {
- 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_checkpoint', 'line'],
- 'lines': [
- ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
- ]},
- 'stat_bgwriter_backend': {
- 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_backend', 'line'],
- 'lines': [
- ['buffers_backend', 'backend', 'incremental', 1, 1024]
- ]},
- 'stat_bgwriter_backend_fsync': {
- 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'],
- 'lines': [
- ['buffers_backend_fsync', 'backend fsync', 'incremental']
- ]},
- 'stat_bgwriter_bgwriter': {
- 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', 'postgres.bgwriter_bgwriter', 'line'],
- 'lines': [
- ['buffers_clean', 'clean', 'incremental', 1, 1024]
- ]},
- 'stat_bgwriter_maxwritten': {
- 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten', 'line'],
- 'lines': [
- ['maxwritten_clean', 'maxwritten', 'incremental']
- ]},
- 'autovacuum': {
- 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'],
- 'lines': [
- ['analyze', 'analyze', 'absolute'],
- ['vacuum', 'vacuum', 'absolute'],
- ['vacuum_analyze', 'vacuum analyze', 'absolute'],
- ['vacuum_freeze', 'vacuum freeze', 'absolute'],
- ['brin_summarize', 'brin summarize', 'absolute']
- ]},
- 'standby_delta': {
- 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
- 'lines': [
- ['sent_delta', 'sent delta', 'absolute', 1, 1024],
- ['write_delta', 'write delta', 'absolute', 1, 1024],
- ['flush_delta', 'flush delta', 'absolute', 1, 1024],
- ['replay_delta', 'replay delta', 'absolute', 1, 1024]
- ]},
- 'replication_slot': {
- 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'],
- 'lines': [
- ['replslot_wal_keep', 'wal keeped', 'absolute'],
- ['replslot_files', 'pg_replslot files', 'absolute']
- ]}
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER[:]
- self.definitions = deepcopy(CHARTS)
- self.table_stats = configuration.pop('table_stats', False)
- self.index_stats = configuration.pop('index_stats', False)
- self.database_poll = configuration.pop('database_poll', None)
- self.configuration = configuration
- self.connection = False
- self.server_version = None
- self.data = dict()
- self.locks_zeroed = dict()
- self.databases = list()
- self.secondaries = list()
- self.replication_slots = list()
- self.queries = QUERY_STATS.copy()
-
- def _connect(self):
- params = dict(user='postgres',
- database=None,
- password=None,
- host=None,
- port=5432)
- params.update(self.configuration)
-
- if not self.connection:
- try:
- self.connection = psycopg2.connect(**params)
- self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
- self.connection.set_session(readonly=True)
- except OperationalError as error:
- return False, str(error)
- return True, True
-
- def check(self):
- if not PSYCOPG2:
- self.error('\'python-psycopg2\' module is needed to use postgres.chart.py')
- return False
- result, error = self._connect()
- if not result:
- conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v))
- for k, v in self.configuration.items())
- self.error('Failed to connect to %s. Error: %s' % (str(conf), error))
- return False
- try:
- cursor = self.connection.cursor()
- self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES'])
- is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
- self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY'])
- self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION'])
- if self.server_version >= 94000:
- self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT'])
- cursor.close()
-
- if self.database_poll and isinstance(self.database_poll, str):
- self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()]\
- or self.databases
-
- self.locks_zeroed = populate_lock_types(self.databases)
- self.add_additional_queries_(is_superuser)
- self.create_dynamic_charts_()
- return True
- except Exception as error:
- self.error(str(error))
- return False
-
- def add_additional_queries_(self, is_superuser):
-
- if self.server_version >= 100000:
- wal = 'wal'
- lsn = 'lsn'
- else:
- wal = 'xlog'
- lsn = 'location'
- self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- self.queries[QUERIES['DIFF_LSN'].format(wal,lsn)] = METRICS['WAL_WRITES']
- self.queries[QUERIES['STANDBY_DELTA'].format(wal,lsn)] = METRICS['STANDBY_DELTA']
-
- if self.index_stats:
- self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
- if self.table_stats:
- self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
- if is_superuser:
- self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE']
- if self.server_version >= 90400:
- self.queries[QUERIES['WAL'].format(wal,lsn)] = METRICS['WAL']
- if self.server_version >= 100000:
- self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES']
- if self.server_version >= 90400:
- self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM']
-
- def create_dynamic_charts_(self):
-
- for database_name in self.databases[::-1]:
- self.definitions['database_size']['lines'].append([database_name + '_size',
- database_name, 'absolute', 1, 1024 * 1024])
- for chart_name in [name for name in self.order if name.startswith('db_stat')]:
- add_database_stat_chart_(order=self.order, definitions=self.definitions,
- name=chart_name, database_name=database_name)
-
- add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
-
- for application_name in self.secondaries[::-1]:
- add_replication_delta_chart_(order=self.order, definitions=self.definitions,
- name='standby_delta', application_name=application_name)
-
- for slot_name in self.replication_slots[::-1]:
- add_replication_slot_chart_(order=self.order, definitions=self.definitions,
- name='replication_slot', slot_name=slot_name)
-
-
-
- def _get_data(self):
- result, error = self._connect()
- if result:
- cursor = self.connection.cursor(cursor_factory=DictCursor)
- try:
- self.data.update(self.locks_zeroed)
- for query, metrics in self.queries.items():
- self.query_stats_(cursor, query, metrics)
-
- except OperationalError:
- self.connection = False
- cursor.close()
- return None
- else:
- cursor.close()
- return self.data
- else:
- return None
-
- def query_stats_(self, cursor, query, metrics):
- cursor.execute(query, dict(databases=tuple(self.databases)))
- for row in cursor:
- for metric in metrics:
- if 'database_name' in row:
- dimension_id = '_'.join([row['database_name'], metric])
- elif 'application_name' in row:
- dimension_id = '_'.join([row['application_name'], metric])
- elif 'slot_name' in row:
- dimension_id = '_'.join([row['slot_name'], metric])
- else:
- dimension_id = metric
- if metric in row:
- self.data[dimension_id] = int(row[metric])
- elif 'locks_count' in row:
- self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
-
-
-def discover_databases_(cursor, query):
- cursor.execute(query)
- result = list()
- for db in [database[0] for database in cursor]:
- if db not in result:
- result.append(db)
- return result
-
-def discover_secondaries_(cursor, query):
- cursor.execute(query)
- result = list()
- for sc in [standby[0] for standby in cursor]:
- if sc not in result:
- result.append(sc)
- return result
-
-def discover_replication_slots_(cursor, query):
- cursor.execute(query)
- result = list()
- for slot in [replication_slot[0] for replication_slot in cursor]:
- if slot not in result:
- result.append(slot)
- return result
-
-def check_if_superuser_(cursor, query):
- cursor.execute(query)
- return cursor.fetchone()[0]
-
-def detect_server_version(cursor, query):
- cursor.execute(query)
- return int(cursor.fetchone()[0])
-
-def populate_lock_types(databases):
- result = dict()
- for database in databases:
- for lock_type in METRICS['LOCKS']:
- key = '_'.join([database, lock_type])
- result[key] = 0
-
- return result
-
-
-def add_database_lock_chart_(order, definitions, database_name):
- def create_lines(database):
- result = list()
- for lock_type in METRICS['LOCKS']:
- dimension_id = '_'.join([database, lock_type])
- result.append([dimension_id, lock_type, 'absolute'])
- return result
-
- chart_name = database_name + '_locks'
- order.insert(-1, chart_name)
- definitions[chart_name] = {
- 'options':
- [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'],
- 'lines': create_lines(database_name)
- }
-
-
-def add_database_stat_chart_(order, definitions, name, database_name):
- def create_lines(database, lines):
- result = list()
- for line in lines:
- new_line = ['_'.join([database, line[0]])] + line[1:]
- result.append(new_line)
- return result
-
- chart_template = CHARTS[name]
- chart_name = '_'.join([database_name, name])
- order.insert(0, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
- definitions[chart_name] = {
- 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
- 'lines': create_lines(database_name, chart_template['lines'])}
-
-def add_replication_delta_chart_(order, definitions, name, application_name):
- def create_lines(standby, lines):
- result = list()
- for line in lines:
- new_line = ['_'.join([standby, line[0]])] + line[1:]
- result.append(new_line)
- return result
-
- chart_template = CHARTS[name]
- chart_name = '_'.join([application_name, name])
- position = order.index('database_size')
- order.insert(position, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
- definitions[chart_name] = {
- 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
- 'lines': create_lines(application_name, chart_template['lines'])}
-
-def add_replication_slot_chart_(order, definitions, name, slot_name):
- def create_lines(slot, lines):
- result = list()
- for line in lines:
- new_line = ['_'.join([slot, line[0]])] + line[1:]
- result.append(new_line)
- return result
-
- chart_template = CHARTS[name]
- chart_name = '_'.join([slot_name, name])
- position = order.index('database_size')
- order.insert(position, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
- definitions[chart_name] = {
- 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type],
- 'lines': create_lines(slot_name, chart_template['lines'])}
diff --git a/python.d/powerdns.chart.py b/python.d/powerdns.chart.py
deleted file mode 100644
index a8d2f399c..000000000
--- a/python.d/powerdns.chart.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: powerdns netdata python.d module
-# Author: l2isbad
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-priority = 60000
-retries = 60
-# update_every = 3
-
-ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
-CHARTS = {
- 'questions': {
- 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
- 'lines': [
- ['udp-queries', None, 'incremental'],
- ['udp-answers', None, 'incremental'],
- ['tcp-queries', None, 'incremental'],
- ['tcp-answers', None, 'incremental']
- ]},
- 'cache_usage': {
- 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
- 'lines': [
- ['query-cache-hit', None, 'incremental'],
- ['query-cache-miss', None, 'incremental'],
- ['packetcache-hit', 'packet-cache-hit', 'incremental'],
- ['packetcache-miss', 'packet-cache-miss', 'incremental']
- ]},
- 'cache_size': {
- 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
- 'lines': [
- ['query-cache-size', None, 'absolute'],
- ['packetcache-size', 'packet-cache-size', 'absolute'],
- ['key-cache-size', None, 'absolute'],
- ['meta-cache-size', None, 'absolute']
- ]},
- 'latency': {
- 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
- 'lines': [
- ['latency', None, 'absolute']
- ]}
-
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- data = self._get_raw_data()
- if not data:
- return None
- return dict((d['name'], d['value']) for d in loads(data))
diff --git a/python.d/python-modules-installer.sh b/python.d/python-modules-installer.sh
deleted file mode 100644
index cda3c6662..000000000
--- a/python.d/python-modules-installer.sh
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env bash
-
-umask 022
-
-dir="/usr/local/libexec/netdata/python.d"
-target="${dir}/python_modules"
-pv="$(python -V 2>&1)"
-
-# parse parameters
-while [ ! -z "${1}" ]
-do
- case "${1}" in
- -p|--python)
- pv="Python ${2}"
- shift 2
- ;;
-
- -d|--dir)
- dir="${2}"
- target="${dir}/python_modules"
- echo >&2 "Will install python modules in: '${target}'"
- shift 2
- ;;
-
- -s|--system)
- target=
- echo >&2 "Will install python modules system-wide"
- shift
- ;;
-
- -h|--help)
- echo "${0} [--dir netdata-python.d-path] [--system]"
- echo "Please make sure you have installed packages: python-pip (or python3-pip) python-dev libyaml-dev libmysqlclient-dev"
- exit 0
- ;;
-
- *)
- echo >&2 "Cannot understand parameter: ${1}"
- exit 1
- ;;
- esac
-done
-
-
-if [ ! -z "${target}" -a ! -d "${target}" ]
-then
- echo >&2 "Cannot find directory: '${target}'"
- exit 1
-fi
-
-if [[ "${pv}" =~ ^Python\ 2.* ]]
-then
- pv=2
- pip="$(which pip2 2>/dev/null)"
-elif [[ "${pv}" =~ ^Python\ 3.* ]]
-then
- pv=3
- pip="$(which pip3 2>/dev/null)"
-else
- echo >&2 "Cannot detect python version. Is python installed?"
- exit 1
-fi
-
-[ -z "${pip}" ] && pip="$(which pip 2>/dev/null)"
-if [ -z "${pip}" ]
-then
- echo >&2 "pip command is required to install python v${pv} modules."
- [ "${pv}" = "2" ] && echo >&2 "Please install python-pip."
- [ "${pv}" = "3" ] && echo >&2 "Please install python3-pip."
- exit 1
-fi
-
-echo >&2 "Working for python version ${pv} (pip command: '${pip}')"
-echo >&2 "Installing netdata python modules in: '${target}'"
-
-run() {
- printf "Running command:\n# "
- printf "%q " "${@}"
- printf "\n"
- "${@}"
-}
-
-# try to install all the python modules given as parameters
-# until the first that succeeds
-failed=""
-installed=""
-errors=0
-pip_install() {
- local ret x msg="${1}"
- shift
-
- echo >&2
- echo >&2
- echo >&2 "Installing one of: ${*}"
-
- for x in "${@}"
- do
- echo >&2
- echo >&2 "attempting to install: ${x}"
- if [ ! -z "${target}" ]
- then
- run "${pip}" install --target "${target}" "${x}"
- ret=$?
- else
- run "${pip}" install "${x}"
- ret=$?
- fi
- [ ${ret} -eq 0 ] && break
- echo >&2 "failed to install: ${x}. ${msg}"
- done
-
- if [ ${ret} -ne 0 ]
- then
- echo >&2
- echo >&2
- echo >&2 "FAILED: could not install any of: ${*}. ${msg}"
- echo >&2
- echo >&2
- errors=$(( errors + 1 ))
- failed="${failed}|${*}"
- else
- echo >&2
- echo >&2
- echo >&2 "SUCCESS: we have: ${x}"
- echo >&2
- echo >&2
- installed="${installed} ${x}"
- fi
- return ${ret}
-}
-
-if [ "${pv}" = "2" ]
-then
- pip_install "is libyaml-dev and python-dev installed?" pyyaml
- pip_install "is libmysqlclient-dev and python-dev installed?" mysqlclient mysql-python pymysql
-else
- pip_install "is libyaml-dev and python-dev installed?" pyyaml
- pip_install "is libmysqlclient-dev and python-dev installed?" mysql-python mysqlclient pymysql
-fi
-
-echo >&2
-echo >&2
-if [ ${errors} -ne 0 ]
-then
- echo >&2 "Failed to install ${errors} modules: ${failed}"
- if [ ! -z "${target}" ]
- then
- echo >&2
- echo >&2 "If you are getting errors during cleanup from pip, there is a known bug"
- echo >&2 "in certain versions of pip that prevents installing packages local to an"
- echo >&2 "application. To install them system-wide please run:"
- echo >&2 "$0 --system"
- fi
- exit 1
-else
- echo >&2 "All done. We have: ${installed}"
- exit 0
-fi
diff --git a/python.d/python_modules/__init__.py b/python.d/python_modules/__init__.py
deleted file mode 100644
index 8d1c8b69c..000000000
--- a/python.d/python_modules/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
deleted file mode 100644
index 7c6e1d2f2..000000000
--- a/python.d/python_modules/base.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: backward compatibility with old version
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.FrameworkServices.UrlService import UrlService
-from bases.FrameworkServices.SocketService import SocketService
-from bases.FrameworkServices.LogService import LogService
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.FrameworkServices.MySQLService import MySQLService
diff --git a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py b/python.d/python_modules/bases/FrameworkServices/ExecutableService.py
deleted file mode 100644
index a71f2bfd2..000000000
--- a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-
-import os
-
-from subprocess import Popen, PIPE
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import find_binary
-
-
-class ExecutableService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.command = None
-
- def _get_raw_data(self, stderr=False):
- """
- Get raw data from executed command
- :return: <list>
- """
- try:
- p = Popen(self.command, stdout=PIPE, stderr=PIPE)
- except Exception as error:
- self.error('Executing command {command} resulted in error: {error}'.format(command=self.command,
- error=error))
- return None
- data = list()
- std = p.stderr if stderr else p.stdout
- for line in std:
- try:
- data.append(line.decode('utf-8'))
- except TypeError:
- continue
-
- return data or None
-
- def check(self):
- """
- Parse basic configuration, check if command is whitelisted and is returning values
- :return: <boolean>
- """
- # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
- if 'command' in self.configuration:
- self.command = self.configuration['command']
-
- # "command" must be: 1.not None 2. type <str>
- if not (self.command and isinstance(self.command, str)):
- self.error('Command is not defined or command type is not <str>')
- return False
-
- # Split "command" into: 1. command <str> 2. options <list>
- command, opts = self.command.split()[0], self.command.split()[1:]
-
- # Check for "bad" symbols in options. No pipes, redirects etc.
- opts_list = ['&', '|', ';', '>', '<']
- bad_opts = set(''.join(opts)) & set(opts_list)
- if bad_opts:
- self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
- return False
-
- # Find absolute path ('echo' => '/bin/echo')
- if '/' not in command:
- command = find_binary(command)
- if not command:
- self.error('Can\'t locate "{command}" binary'.format(command=self.command))
- return False
- # Check if binary exist and executable
- else:
- if not os.access(command, os.X_OK):
- self.error('"{binary}" is not executable'.format(binary=command))
- return False
-
- self.command = [command] + opts if opts else [command]
-
- try:
- data = self._get_data()
- except Exception as error:
- self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
- error=error))
- return False
-
- if isinstance(data, dict) and data:
- return True
- self.error('Command "{command}" returned no data'.format(command=self.command))
- return False
diff --git a/python.d/python_modules/bases/FrameworkServices/LogService.py b/python.d/python_modules/bases/FrameworkServices/LogService.py
deleted file mode 100644
index 45daa2446..000000000
--- a/python.d/python_modules/bases/FrameworkServices/LogService.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-
-from glob import glob
-import os
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-class LogService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.log_path = self.configuration.get('path')
- self.__glob_path = self.log_path
- self._last_position = 0
- self.__re_find = dict(current=0, run=0, maximum=60)
-
- def _get_raw_data(self):
- """
- Get log lines since last poll
- :return: list
- """
- lines = list()
- try:
- if self.__re_find['current'] == self.__re_find['run']:
- self._find_recent_log_file()
- size = os.path.getsize(self.log_path)
- if size == self._last_position:
- self.__re_find['current'] += 1
- return list() # return empty list if nothing has changed
- elif size < self._last_position:
- self._last_position = 0 # read from beginning if file has shrunk
-
- with open(self.log_path) as fp:
- fp.seek(self._last_position)
- for line in fp:
- lines.append(line)
- self._last_position = fp.tell()
- self.__re_find['current'] = 0
- except (OSError, IOError) as error:
- self.__re_find['current'] += 1
- self.error(str(error))
-
- return lines or None
-
- def _find_recent_log_file(self):
- """
- :return:
- """
- self.__re_find['run'] = self.__re_find['maximum']
- self.__re_find['current'] = 0
- self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
- path_list = glob(self.__glob_path)
- if path_list:
- self.log_path = max(path_list)
- return True
- return False
-
- def check(self):
- """
- Parse basic configuration and check if log file exists
- :return: boolean
- """
- if not self.log_path:
- self.error('No path to log specified')
- return None
-
- if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path):
- return True
- self.error('Cannot access {0}'.format(self.log_path))
- return False
-
- def create(self):
- # set cursor at last byte of log file
- self._last_position = os.path.getsize(self.log_path)
- status = SimpleService.create(self)
- return status
diff --git a/python.d/python_modules/bases/FrameworkServices/MySQLService.py b/python.d/python_modules/bases/FrameworkServices/MySQLService.py
deleted file mode 100644
index 3acc5b109..000000000
--- a/python.d/python_modules/bases/FrameworkServices/MySQLService.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (l2isbad)
-
-from sys import exc_info
-
-try:
- import MySQLdb
-
- PY_MYSQL = True
-except ImportError:
- try:
- import pymysql as MySQLdb
-
- PY_MYSQL = True
- except ImportError:
- PY_MYSQL = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-class MySQLService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.__connection = None
- self.__conn_properties = dict()
- self.extra_conn_properties = dict()
- self.__queries = self.configuration.get('queries', dict())
- self.queries = dict()
-
- def __connect(self):
- try:
- connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
- except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
- return None, str(error)
- else:
- return connection, None
-
- def check(self):
- def get_connection_properties(conf, extra_conf):
- properties = dict()
- if conf.get('user'):
- properties['user'] = conf['user']
- if conf.get('pass'):
- properties['passwd'] = conf['pass']
- if conf.get('socket'):
- properties['unix_socket'] = conf['socket']
- elif conf.get('host'):
- properties['host'] = conf['host']
- properties['port'] = int(conf.get('port', 3306))
- elif conf.get('my.cnf'):
- if MySQLdb.__name__ == 'pymysql':
- self.error('"my.cnf" parsing is not working for pymysql')
- else:
- properties['read_default_file'] = conf['my.cnf']
- if isinstance(extra_conf, dict) and extra_conf:
- properties.update(extra_conf)
-
- return properties or None
-
- def is_valid_queries_dict(raw_queries, log_error):
- """
- :param raw_queries: dict:
- :param log_error: function:
- :return: dict or None
-
- raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
- """
-
- def is_valid_query(query):
- return all([isinstance(query, str),
- query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
-
- if hasattr(raw_queries, 'keys') and raw_queries:
- valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
- bad_queries = set(raw_queries) - set(valid_queries)
-
- if bad_queries:
- log_error('Removed query(s): {queries}'.format(queries=bad_queries))
- return valid_queries
- else:
- log_error('Unsupported "queries" format. Must be not empty <dict>')
- return None
-
- if not PY_MYSQL:
- self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
- return False
-
- # Preference: 1. "queries" from the configuration file 2. "queries" from the module
- self.queries = self.__queries or self.queries
- # Check if "self.queries" exist, not empty and all queries are in valid format
- self.queries = is_valid_queries_dict(self.queries, self.error)
- if not self.queries:
- return None
-
- # Get connection properties
- self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
- if not self.__conn_properties:
- self.error('Connection properties are missing')
- return False
-
- # Create connection to the database
- self.__connection, error = self.__connect()
- if error:
- self.error('Can\'t establish connection to MySQL: {error}'.format(error=error))
- return False
-
- try:
- data = self._get_data()
- except Exception as error:
- self.error('_get_data() failed. Error: {error}'.format(error=error))
- return False
-
- if isinstance(data, dict) and data:
- return True
- self.error("_get_data() returned no data or type is not <dict>")
- return False
-
- def _get_raw_data(self, description=None):
- """
- Get raw data from MySQL server
- :return: dict: fetchall() or (fetchall(), description)
- """
-
- if not self.__connection:
- self.__connection, error = self.__connect()
- if error:
- return None
-
- raw_data = dict()
- queries = dict(self.queries)
- try:
- with self.__connection as cursor:
- for name, query in queries.items():
- try:
- cursor.execute(query)
- except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
- if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
- raise RuntimeError
- self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
- query=query,
- error=error))
- self.queries.pop(name)
- continue
- else:
- raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
- self.__connection.commit()
- except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
- self.__connection.close()
- self.__connection = None
- return None
- else:
- return raw_data or None
-
- @staticmethod
- def __is_error_critical(err_class, err_text):
- return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
- 'Unknown column' not in err_text])
diff --git a/python.d/python_modules/bases/FrameworkServices/SimpleService.py b/python.d/python_modules/bases/FrameworkServices/SimpleService.py
deleted file mode 100644
index 177332c1f..000000000
--- a/python.d/python_modules/bases/FrameworkServices/SimpleService.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-
-from threading import Thread
-
-try:
- from time import sleep, monotonic as time
-except ImportError:
- from time import sleep, time
-
-from bases.charts import Charts, ChartError, create_runtime_chart
-from bases.collection import OldVersionCompatibility, safe_print
-from bases.loggers import PythonDLimitedLogger
-
-RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
- 'SET run_time = {elapsed}\n' \
- 'END\n'
-
-
-class RuntimeCounters:
- def __init__(self, configuration):
- """
- :param configuration: <dict>
- """
- self.FREQ = int(configuration.pop('update_every'))
- self.START_RUN = 0
- self.NEXT_RUN = 0
- self.PREV_UPDATE = 0
- self.SINCE_UPDATE = 0
- self.ELAPSED = 0
- self.RETRIES = 0
- self.RETRIES_MAX = configuration.pop('retries')
- self.PENALTY = 0
- self.RUNS = 1
-
- def is_sleep_time(self):
- return self.START_RUN < self.NEXT_RUN
-
-
-class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object):
- """
- Prototype of Service class.
- Implemented basic functionality to run jobs by `python.d.plugin`
- """
- def __init__(self, configuration, name=''):
- """
- :param configuration: <dict>
- :param name: <str>
- """
- Thread.__init__(self)
- self.daemon = True
- PythonDLimitedLogger.__init__(self)
- OldVersionCompatibility.__init__(self)
- self.configuration = configuration
- self.order = list()
- self.definitions = dict()
-
- self.module_name = self.__module__
- self.job_name = configuration.pop('job_name')
- self.override_name = configuration.pop('override_name')
- self.fake_name = None
-
- self._runtime_counters = RuntimeCounters(configuration=configuration)
- self.charts = Charts(job_name=self.actual_name,
- priority=configuration.pop('priority'),
- cleanup=configuration.pop('chart_cleanup'),
- get_update_every=self.get_update_every,
- module_name=self.module_name)
-
- def __repr__(self):
- return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__),
- name=self.name)
-
- @property
- def name(self):
- if self.job_name:
- return '_'.join([self.module_name, self.override_name or self.job_name])
- return self.module_name
-
- def actual_name(self):
- return self.fake_name or self.name
-
- @property
- def runs_counter(self):
- return self._runtime_counters.RUNS
-
- @property
- def update_every(self):
- return self._runtime_counters.FREQ
-
- @update_every.setter
- def update_every(self, value):
- """
- :param value: <int>
- :return:
- """
- self._runtime_counters.FREQ = value
-
- def get_update_every(self):
- return self.update_every
-
- def check(self):
- """
- check() prototype
- :return: boolean
- """
- self.debug("job doesn't implement check() method. Using default which simply invokes get_data().")
- data = self.get_data()
- if data and isinstance(data, dict):
- return True
- self.debug('returned value is wrong: {0}'.format(data))
- return False
-
- @create_runtime_chart
- def create(self):
- for chart_name in self.order:
- chart_config = self.definitions.get(chart_name)
-
- if not chart_config:
- self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. "
- "Skipping it.".format(chart_name=chart_name))
- continue
-
- # create chart
- chart_params = [chart_name] + chart_config['options']
- try:
- self.charts.add_chart(params=chart_params)
- except ChartError as error:
- self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name,
- error=error))
- continue
-
- # add dimensions to chart
- for dimension in chart_config['lines']:
- try:
- self.charts[chart_name].add_dimension(dimension)
- except ChartError as error:
- self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension,
- error=error))
- continue
-
- # add variables to chart
- if 'variables' in chart_config:
- for variable in chart_config['variables']:
- try:
- self.charts[chart_name].add_variable(variable)
- except ChartError as error:
- self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable,
- error=error))
- continue
-
- del self.order
- del self.definitions
-
- # True if job has at least 1 chart else False
- return bool(self.charts)
-
- def run(self):
- """
- Runs job in thread. Handles retries.
- Exits when job failed or timed out.
- :return: None
- """
- job = self._runtime_counters
- self.debug('started, update frequency: {freq}, '
- 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES))
-
- while True:
- job.START_RUN = time()
-
- job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY
-
- self.sleep_until_next_run()
-
- if job.PREV_UPDATE:
- job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6)
-
- try:
- updated = self.update(interval=job.SINCE_UPDATE)
- except Exception as error:
- self.error('update() unhandled exception: {error}'.format(error=error))
- updated = False
-
- job.RUNS += 1
-
- if not updated:
- if not self.manage_retries():
- return
- else:
- job.ELAPSED = int((time() - job.START_RUN) * 1e3)
- job.PREV_UPDATE = job.START_RUN
- job.RETRIES, job.PENALTY = 0, 0
- safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
- since_last=job.SINCE_UPDATE,
- elapsed=job.ELAPSED))
- self.debug('update => [{status}] (elapsed time: {elapsed}, '
- 'retries left: {retries})'.format(status='OK' if updated else 'FAILED',
- elapsed=job.ELAPSED if updated else '-',
- retries=job.RETRIES_MAX - job.RETRIES))
-
- def update(self, interval):
- """
- :return:
- """
- data = self.get_data()
- if not data:
- self.debug('get_data() returned no data')
- return False
- elif not isinstance(data, dict):
- self.debug('get_data() returned incorrect type data')
- return False
-
- updated = False
-
- for chart in self.charts:
- if chart.flags.obsoleted:
- if chart.can_be_updated(data):
- chart.refresh()
- else:
- continue
- elif self.charts.cleanup and chart.penalty >= self.charts.cleanup:
- chart.obsolete()
- self.error("chart '{0}' was suppressed due to non updating".format(chart.name))
- continue
-
- ok = chart.update(data, interval)
- if ok:
- updated = True
-
- if not updated:
- self.debug('none of the charts has been updated')
-
- return updated
-
- def manage_retries(self):
- rc = self._runtime_counters
- rc.RETRIES += 1
- if rc.RETRIES % 5 == 0:
- rc.PENALTY = int(rc.RETRIES * self.update_every / 2)
- if rc.RETRIES >= rc.RETRIES_MAX:
- self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX))
- return False
- return True
-
- def sleep_until_next_run(self):
- job = self._runtime_counters
-
- # sleep() is interruptable
- while job.is_sleep_time():
- sleep_time = job.NEXT_RUN - job.START_RUN
- self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time,
- freq=job.FREQ + job.PENALTY))
- sleep(sleep_time)
- job.START_RUN = time()
-
- def get_data(self):
- return self._get_data()
-
- def _get_data(self):
- raise NotImplementedError
diff --git a/python.d/python_modules/bases/FrameworkServices/SocketService.py b/python.d/python_modules/bases/FrameworkServices/SocketService.py
deleted file mode 100644
index 8d27ae660..000000000
--- a/python.d/python_modules/bases/FrameworkServices/SocketService.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-
-import socket
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-class SocketService(SimpleService):
- def __init__(self, configuration=None, name=None):
- self._sock = None
- self._keep_alive = False
- self.host = 'localhost'
- self.port = None
- self.unix_socket = None
- self.dgram_socket = False
- self.request = ''
- self.__socket_config = None
- self.__empty_request = "".encode()
- SimpleService.__init__(self, configuration=configuration, name=name)
-
- def _socket_error(self, message=None):
- if self.unix_socket is not None:
- self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket,
- message=message))
- else:
- if self.__socket_config is not None:
- af, sock_type, proto, canon_name, sa = self.__socket_config
- self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
- port=sa[1],
- message=message))
- else:
- self.error('unknown socket: {0}'.format(message))
-
- def _connect2socket(self, res=None):
- """
- Connect to a socket, passing the result of getaddrinfo()
- :return: boolean
- """
- if res is None:
- res = self.__socket_config
- if res is None:
- self.error("Cannot create socket to 'None':")
- return False
-
- af, sock_type, proto, canon_name, sa = res
- try:
- self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- self._sock = socket.socket(af, sock_type, proto)
- except socket.error as error:
- self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0],
- port=sa[1],
- error=error))
- self._sock = None
- self.__socket_config = None
- return False
-
- try:
- self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- self._sock.connect(sa)
- except socket.error as error:
- self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
- port=sa[1],
- error=error))
- self._disconnect()
- self.__socket_config = None
- return False
-
- self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- self.__socket_config = res
- return True
-
- def _connect2unixsocket(self):
- """
- Connect to a unix socket, given its filename
- :return: boolean
- """
- if self.unix_socket is None:
- self.error("cannot connect to unix socket 'None'")
- return False
-
- try:
- self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket))
- self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- self._sock.connect(self.unix_socket)
- self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket))
- return True
- except socket.error as error:
- self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
- error=error))
-
- try:
- self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket))
- self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self._sock.connect(self.unix_socket)
- self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket))
- return True
- except socket.error as error:
- self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
- error=error))
- self._sock = None
- return False
-
- def _connect(self):
- """
- Recreate socket and connect to it since sockets cannot be reused after closing
- Available configurations are IPv6, IPv4 or UNIX socket
- :return:
- """
- try:
- if self.unix_socket is not None:
- self._connect2unixsocket()
-
- else:
- if self.__socket_config is not None:
- self._connect2socket()
- else:
- if self.dgram_socket:
- sock_type = socket.SOCK_DGRAM
- else:
- sock_type = socket.SOCK_STREAM
- for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type):
- if self._connect2socket(res):
- break
-
- except Exception:
- self._sock = None
- self.__socket_config = None
-
- if self._sock is not None:
- self._sock.setblocking(0)
- self._sock.settimeout(5)
- self.debug('set socket timeout to: {0}'.format(self._sock.gettimeout()))
-
- def _disconnect(self):
- """
- Close socket connection
- :return:
- """
- if self._sock is not None:
- try:
- self.debug('closing socket')
- self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
- self._sock.close()
- except Exception:
- pass
- self._sock = None
-
- def _send(self):
- """
- Send request.
- :return: boolean
- """
- # Send request if it is needed
- if self.request != self.__empty_request:
- try:
- self.debug('sending request: {0}'.format(self.request))
- self._sock.send(self.request)
- except Exception as error:
- self._socket_error('error sending request: {0}'.format(error))
- self._disconnect()
- return False
- return True
-
- def _receive(self, raw=False):
- """
- Receive data from socket
- :param raw: set `True` to return bytes
- :type raw: bool
- :return: decoded str or raw bytes
- :rtype: str/bytes
- """
- data = "" if not raw else b""
- while True:
- self.debug('receiving response')
- try:
- buf = self._sock.recv(4096)
- except Exception as error:
- self._socket_error('failed to receive response: {0}'.format(error))
- self._disconnect()
- break
-
- if buf is None or len(buf) == 0: # handle server disconnect
- if data == "" or data == b"":
- self._socket_error('unexpectedly disconnected')
- else:
- self.debug('server closed the connection')
- self._disconnect()
- break
-
- self.debug('received data')
- data += buf.decode('utf-8', 'ignore') if not raw else buf
- if self._check_raw_data(data):
- break
-
- self.debug('final response: {0}'.format(data))
- return data
-
- def _get_raw_data(self, raw=False):
- """
- Get raw data with low-level "socket" module.
- :param raw: set `True` to return bytes
- :type raw: bool
- :return: decoded data (str) or raw data (bytes)
- :rtype: str/bytes
- """
- if self._sock is None:
- self._connect()
- if self._sock is None:
- return None
-
- # Send request if it is needed
- if not self._send():
- return None
-
- data = self._receive(raw)
-
- if not self._keep_alive:
- self._disconnect()
-
- return data
-
- @staticmethod
- def _check_raw_data(data):
- """
- Check if all data has been gathered from socket
- :param data: str
- :return: boolean
- """
- return bool(data)
-
- def _parse_config(self):
- """
- Parse configuration data
- :return: boolean
- """
- try:
- self.unix_socket = str(self.configuration['socket'])
- except (KeyError, TypeError):
- self.debug('No unix socket specified. Trying TCP/IP socket.')
- self.unix_socket = None
- try:
- self.host = str(self.configuration['host'])
- except (KeyError, TypeError):
- self.debug('No host specified. Using: "{0}"'.format(self.host))
- try:
- self.port = int(self.configuration['port'])
- except (KeyError, TypeError):
- self.debug('No port specified. Using: "{0}"'.format(self.port))
-
- try:
- self.request = str(self.configuration['request'])
- except (KeyError, TypeError):
- self.debug('No request specified. Using: "{0}"'.format(self.request))
-
- self.request = self.request.encode()
-
- def check(self):
- self._parse_config()
- return SimpleService.check(self)
diff --git a/python.d/python_modules/bases/FrameworkServices/UrlService.py b/python.d/python_modules/bases/FrameworkServices/UrlService.py
deleted file mode 100644
index bb340ba3b..000000000
--- a/python.d/python_modules/bases/FrameworkServices/UrlService.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-
-import urllib3
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-try:
- urllib3.disable_warnings()
-except AttributeError:
- pass
-
-
-class UrlService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url')
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- self.proxy_user = self.configuration.get('proxy_user')
- self.proxy_password = self.configuration.get('proxy_pass')
- self.proxy_url = self.configuration.get('proxy_url')
- self.header = self.configuration.get('header')
- self.request_timeout = self.configuration.get('timeout', 1)
- self._manager = None
-
- def __make_headers(self, **header_kw):
- user = header_kw.get('user') or self.user
- password = header_kw.get('pass') or self.password
- proxy_user = header_kw.get('proxy_user') or self.proxy_user
- proxy_password = header_kw.get('proxy_pass') or self.proxy_password
- custom_header = header_kw.get('header') or self.header
- header_params = dict(keep_alive=True)
- proxy_header_params = dict()
- if user and password:
- header_params['basic_auth'] = '{user}:{password}'.format(user=user,
- password=password)
- if proxy_user and proxy_password:
- proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
- password=proxy_password)
- try:
- header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
- except TypeError as error:
- self.error('build_header() error: {error}'.format(error=error))
- return None, None
- else:
- header.update(custom_header or dict())
- return header, proxy_header
-
- def _build_manager(self, **header_kw):
- header, proxy_header = self.__make_headers(**header_kw)
- if header is None or proxy_header is None:
- return None
- proxy_url = header_kw.get('proxy_url') or self.proxy_url
- if proxy_url:
- manager = urllib3.ProxyManager
- params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
- else:
- manager = urllib3.PoolManager
- params = dict(headers=header)
- try:
- url = header_kw.get('url') or self.url
- if url.startswith('https'):
- return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params)
- return manager(**params)
- except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
- self.error('build_manager() error:', str(error))
- return None
-
- def _get_raw_data(self, url=None, manager=None):
- """
- Get raw data from http request
- :return: str
- """
- try:
- status, data = self._get_raw_data_with_status(url, manager)
- except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error:
- self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
- return None
-
- if status == 200:
- return data.decode()
- else:
- self.debug('Url: {url}. Http response status code: {code}'.format(url=url, code=status))
- return None
-
- def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True):
- """
- Get status and response body content from http request. Does not catch exceptions
- :return: int, str
- """
- url = url or self.url
- manager = manager or self._manager
- response = manager.request(method='GET',
- url=url,
- timeout=self.request_timeout,
- retries=retries,
- headers=manager.headers,
- redirect=redirect)
- return response.status, response.data
-
- def check(self):
- """
- Format configuration data and try to connect to server
- :return: boolean
- """
- if not (self.url and isinstance(self.url, str)):
- self.error('URL is not defined or type is not <str>')
- return False
-
- self._manager = self._build_manager()
- if not self._manager:
- return False
-
- try:
- data = self._get_data()
- except Exception as error:
- self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
- return False
-
- if isinstance(data, dict) and data:
- return True
- self.error('_get_data() returned no data or type is not <dict>')
- return False
diff --git a/python.d/python_modules/bases/FrameworkServices/__init__.py b/python.d/python_modules/bases/FrameworkServices/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/python.d/python_modules/bases/FrameworkServices/__init__.py
+++ /dev/null
diff --git a/python.d/python_modules/bases/__init__.py b/python.d/python_modules/bases/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/python.d/python_modules/bases/__init__.py
+++ /dev/null
diff --git a/python.d/python_modules/bases/charts.py b/python.d/python_modules/bases/charts.py
deleted file mode 100644
index 5394fbf64..000000000
--- a/python.d/python_modules/bases/charts.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (l2isbad)
-
-from bases.collection import safe_print
-
-CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type']
-DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
-VARIABLE_PARAMS = ['id', 'value']
-
-CHART_TYPES = ['line', 'area', 'stacked']
-DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
-
-CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
-CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '' 'python.d.plugin' '{module_name}'\n"
-CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} 'obsolete'\n"
-
-
-DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n"
-DIMENSION_SET = "SET '{id}' = {value}\n"
-
-CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
-
-RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \
- "netdata.pythond_runtime line 145000 {update_every}\n" \
- "DIMENSION run_time 'run time' absolute 1 1\n"
-
-
-def create_runtime_chart(func):
- """
- Calls a wrapped function, then prints runtime chart to stdout.
-
- Used as a decorator for SimpleService.create() method.
- The whole point of making 'create runtime chart' functionality as a decorator was
- to help users who re-implements create() in theirs classes.
-
- :param func: class method
- :return:
- """
- def wrapper(*args, **kwargs):
- self = args[0]
- ok = func(*args, **kwargs)
- if ok:
- safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
- update_every=self._runtime_counters.FREQ))
- return ok
- return wrapper
-
-
-class ChartError(Exception):
- """Base-class for all exceptions raised by this module"""
-
-
-class DuplicateItemError(ChartError):
- """Occurs when user re-adds a chart or a dimension that has already been added"""
-
-
-class ItemTypeError(ChartError):
- """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
-
-
-class ItemValueError(ChartError):
- """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
-
-
-class Charts:
- """Represent a collection of charts
-
- All charts stored in a dict.
- Chart is a instance of Chart class.
- Charts adding must be done using Charts.add_chart() method only"""
- def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
- """
- :param job_name: <bound method>
- :param priority: <int>
- :param get_update_every: <bound method>
- """
- self.job_name = job_name
- self.priority = priority
- self.cleanup = cleanup
- self.get_update_every = get_update_every
- self.module_name = module_name
- self.charts = dict()
-
- def __len__(self):
- return len(self.charts)
-
- def __iter__(self):
- return iter(self.charts.values())
-
- def __repr__(self):
- return 'Charts({0})'.format(self)
-
- def __str__(self):
- return str([chart for chart in self.charts])
-
- def __contains__(self, item):
- return item in self.charts
-
- def __getitem__(self, item):
- return self.charts[item]
-
- def __delitem__(self, key):
- del self.charts[key]
-
- def __bool__(self):
- return bool(self.charts)
-
- def __nonzero__(self):
- return self.__bool__()
-
- def add_chart(self, params):
- """
- Create Chart instance and add it to the dict
-
- Manually adds job name, priority and update_every to params.
- :param params: <list>
- :return:
- """
- params = [self.job_name()] + params
- new_chart = Chart(params)
-
- new_chart.params['update_every'] = self.get_update_every()
- new_chart.params['priority'] = self.priority
- new_chart.params['module_name'] = self.module_name
-
- self.priority += 1
- self.charts[new_chart.id] = new_chart
-
- return new_chart
-
- def active_charts(self):
- return [chart.id for chart in self if not chart.flags.obsoleted]
-
-
-class Chart:
- """Represent a chart"""
- def __init__(self, params):
- """
- :param params: <list>
- """
- if not isinstance(params, list):
- raise ItemTypeError("'chart' must be a list type")
- if not len(params) >= 8:
- raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
-
- self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
- self.name = '{type}.{id}'.format(type=self.params['type'],
- id=self.params['id'])
- if self.params.get('chart_type') not in CHART_TYPES:
- self.params['chart_type'] = 'absolute'
-
- self.dimensions = list()
- self.variables = set()
- self.flags = ChartFlags()
- self.penalty = 0
-
- def __getattr__(self, item):
- try:
- return self.params[item]
- except KeyError:
- raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
- attr=item))
-
- def __repr__(self):
- return 'Chart({0})'.format(self.id)
-
- def __str__(self):
- return self.id
-
- def __iter__(self):
- return iter(self.dimensions)
-
- def __contains__(self, item):
- return item in [dimension.id for dimension in self.dimensions]
-
- def add_variable(self, variable):
- """
- :param variable: <list>
- :return:
- """
- self.variables.add(ChartVariable(variable))
-
- def add_dimension(self, dimension):
- """
- :param dimension: <list>
- :return:
- """
- dim = Dimension(dimension)
-
- if dim.id in self:
- raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
- chart=self.name))
- self.refresh()
- self.dimensions.append(dim)
- return dim
-
- def hide_dimension(self, dimension_id, reverse=False):
- if dimension_id in self:
- idx = self.dimensions.index(dimension_id)
- dimension = self.dimensions[idx]
- dimension.params['hidden'] = 'hidden' if not reverse else str()
- self.refresh()
-
- def create(self):
- """
- :return:
- """
- chart = CHART_CREATE.format(**self.params)
- dimensions = ''.join([dimension.create() for dimension in self.dimensions])
- variables = ''.join([var.set(var.value) for var in self.variables if var])
-
- self.flags.push = False
- self.flags.created = True
-
- safe_print(chart + dimensions + variables)
-
- def can_be_updated(self, data):
- for dim in self.dimensions:
- if dim.get_value(data) is not None:
- return True
- return False
-
- def update(self, data, interval):
- updated_dimensions, updated_variables = str(), str()
-
- for dim in self.dimensions:
- value = dim.get_value(data)
- if value is not None:
- updated_dimensions += dim.set(value)
-
- for var in self.variables:
- value = var.get_value(data)
- if value is not None:
- updated_variables += var.set(value)
-
- if updated_dimensions:
- since_last = interval if self.flags.updated else 0
-
- if self.flags.push:
- self.create()
-
- chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
- safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
-
- self.flags.updated = True
- self.penalty = 0
- else:
- self.penalty += 1
- self.flags.updated = False
-
- return bool(updated_dimensions)
-
- def obsolete(self):
- self.flags.obsoleted = True
- if self.flags.created:
- safe_print(CHART_OBSOLETE.format(**self.params))
-
- def refresh(self):
- self.penalty = 0
- self.flags.push = True
- self.flags.obsoleted = False
-
-
-class Dimension:
- """Represent a dimension"""
- def __init__(self, params):
- """
- :param params: <list>
- """
- if not isinstance(params, list):
- raise ItemTypeError("'dimension' must be a list type")
- if not params:
- raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
-
- self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
- self.params['name'] = self.params.get('name') or self.params['id']
-
- if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
- self.params['algorithm'] = 'absolute'
- if not isinstance(self.params.get('multiplier'), int):
- self.params['multiplier'] = 1
- if not isinstance(self.params.get('divisor'), int):
- self.params['divisor'] = 1
- self.params.setdefault('hidden', '')
-
- def __getattr__(self, item):
- try:
- return self.params[item]
- except KeyError:
- raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
- attr=item))
-
- def __repr__(self):
- return 'Dimension({0})'.format(self.id)
-
- def __str__(self):
- return self.id
-
- def __eq__(self, other):
- if not isinstance(other, Dimension):
- return self.id == other
- return self.id == other.id
-
- def create(self):
- return DIMENSION_CREATE.format(**self.params)
-
- def set(self, value):
- """
- :param value: <str>: must be a digit
- :return:
- """
- return DIMENSION_SET.format(id=self.id,
- value=value)
-
- def get_value(self, data):
- try:
- return int(data[self.id])
- except (KeyError, TypeError):
- return None
-
-
-class ChartVariable:
- """Represent a chart variable"""
- def __init__(self, params):
- """
- :param params: <list>
- """
- if not isinstance(params, list):
- raise ItemTypeError("'variable' must be a list type")
- if not params:
- raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
-
- self.params = dict(zip(VARIABLE_PARAMS, params))
- self.params.setdefault('value', None)
-
- def __getattr__(self, item):
- try:
- return self.params[item]
- except KeyError:
- raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
- attr=item))
-
- def __bool__(self):
- return self.value is not None
-
- def __nonzero__(self):
- return self.__bool__()
-
- def __repr__(self):
- return 'ChartVariable({0})'.format(self.id)
-
- def __str__(self):
- return self.id
-
- def __eq__(self, other):
- if isinstance(other, ChartVariable):
- return self.id == other.id
- return False
-
- def __hash__(self):
- return hash(repr(self))
-
- def set(self, value):
- return CHART_VARIABLE_SET.format(id=self.id,
- value=value)
-
- def get_value(self, data):
- try:
- return int(data[self.id])
- except (KeyError, TypeError):
- return None
-
-
-class ChartFlags:
- def __init__(self):
- self.push = True
- self.created = False
- self.updated = False
- self.obsoleted = False
diff --git a/python.d/python_modules/bases/collection.py b/python.d/python_modules/bases/collection.py
deleted file mode 100644
index e03b4f58e..000000000
--- a/python.d/python_modules/bases/collection.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (l2isbad)
-
-import os
-
-PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
-
-CHART_BEGIN = 'BEGIN {0} {1}\n'
-CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
-DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
-DIMENSION_SET = "SET '{0}' = {1}\n"
-
-
-def setdefault_values(config, base_dict):
- for key, value in base_dict.items():
- config.setdefault(key, value)
- return config
-
-
-def run_and_exit(func):
- def wrapper(*args, **kwargs):
- func(*args, **kwargs)
- exit(1)
- return wrapper
-
-
-def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
- except_func = on_except[0]
- finally_func = on_finally[0]
-
- def decorator(func):
- def wrapper(*args, **kwargs):
- try:
- func(*args, **kwargs)
- except Exception:
- if except_func:
- except_func(*on_except[1:])
- finally:
- if finally_func:
- finally_func(*on_finally[1:])
- return wrapper
- return decorator
-
-
-def static_vars(**kwargs):
- def decorate(func):
- for k in kwargs:
- setattr(func, k, kwargs[k])
- return func
- return decorate
-
-
-@on_try_except_finally(on_except=(exit, 1))
-def safe_print(*msg):
- """
- :param msg:
- :return:
- """
- print(''.join(msg))
-
-
-def find_binary(binary):
- """
- :param binary: <str>
- :return:
- """
- for directory in PATH:
- binary_name = '/'.join([directory, binary])
- if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
- return binary_name
- return None
-
-
-def read_last_line(f):
- with open(f, 'rb') as opened:
- opened.seek(-2, 2)
- while opened.read(1) != b'\n':
- opened.seek(-2, 1)
- if opened.tell() == 0:
- break
- result = opened.readline()
- return result.decode()
-
-
-class OldVersionCompatibility:
-
- def __init__(self):
- self._data_stream = str()
-
- def begin(self, type_id, microseconds=0):
- """
- :param type_id: <str>
- :param microseconds: <str> or <int>: must be a digit
- :return:
- """
- self._data_stream += CHART_BEGIN.format(type_id, microseconds)
-
- def set(self, dim_id, value):
- """
- :param dim_id: <str>
- :param value: <int> or <str>: must be a digit
- :return:
- """
- self._data_stream += DIMENSION_SET.format(dim_id, value)
-
- def end(self):
- self._data_stream += 'END\n'
-
- def chart(self, type_id, name='', title='', units='', family='', category='', chart_type='line',
- priority='', update_every=''):
- """
- :param type_id: <str>
- :param name: <str>
- :param title: <str>
- :param units: <str>
- :param family: <str>
- :param category: <str>
- :param chart_type: <str>
- :param priority: <str> or <int>
- :param update_every: <str> or <int>
- :return:
- """
- self._data_stream += CHART_CREATE.format(type_id, name, title, units,
- family, category, chart_type,
- priority, update_every)
-
- def dimension(self, dim_id, name=None, algorithm="absolute", multiplier=1, divisor=1, hidden=False):
- """
- :param dim_id: <str>
- :param name: <str> or None
- :param algorithm: <str>
- :param multiplier: <str> or <int>: must be a digit
- :param divisor: <str> or <int>: must be a digit
- :param hidden: <str>: literally "hidden" or ""
- :return:
- """
- self._data_stream += DIMENSION_CREATE.format(dim_id, name or dim_id, algorithm,
- multiplier, divisor, hidden or str())
-
- @on_try_except_finally(on_except=(exit, 1))
- def commit(self):
- print(self._data_stream)
- self._data_stream = str()
diff --git a/python.d/python_modules/bases/loaders.py b/python.d/python_modules/bases/loaders.py
deleted file mode 100644
index d18b9dcd0..000000000
--- a/python.d/python_modules/bases/loaders.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (l2isbad)
-
-import types
-from sys import version_info
-
-PY_VERSION = version_info[:2]
-
-if PY_VERSION > (3, 1):
- from pyyaml3 import SafeLoader as YamlSafeLoader
- from importlib.machinery import SourceFileLoader
- DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-else:
- from pyyaml2 import SafeLoader as YamlSafeLoader
- from imp import load_source as SourceFileLoader
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-
-def dict_constructor(loader, node):
- return OrderedDict(loader.construct_pairs(node))
-
-
-YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
-
-
-class YamlOrderedLoader:
- @staticmethod
- def load_config_from_file(file_name):
- opened, loaded = False, False
- try:
- stream = open(file_name, 'r')
- opened = True
- loader = YamlSafeLoader(stream)
- loaded = True
- parsed = loader.get_single_data() or dict()
- except Exception as error:
- return dict(), error
- else:
- return parsed, None
- finally:
- if opened:
- stream.close()
- if loaded:
- loader.dispose()
-
-
-class SourceLoader:
- @staticmethod
- def load_module_from_file(name, path):
- try:
- loaded = SourceFileLoader(name, path)
- if isinstance(loaded, types.ModuleType):
- return loaded, None
- return loaded.load_module(), None
- except Exception as error:
- return None, error
-
-
-class ModuleAndConfigLoader(YamlOrderedLoader, SourceLoader):
- pass
diff --git a/python.d/python_modules/bases/loggers.py b/python.d/python_modules/bases/loggers.py
deleted file mode 100644
index fc40b83d3..000000000
--- a/python.d/python_modules/bases/loggers.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (l2isbad)
-
-import logging
-import traceback
-
-from sys import exc_info
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-from bases.collection import on_try_except_finally
-
-
-LOGGING_LEVELS = {'CRITICAL': 50,
- 'ERROR': 40,
- 'WARNING': 30,
- 'INFO': 20,
- 'DEBUG': 10,
- 'NOTSET': 0}
-
-DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
-DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
-
-PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s: %(job_name)s: %(message)s'
-PYTHON_D_LOG_NAME = 'python.d'
-
-
-def limiter(log_max_count=30, allowed_in_seconds=60):
- def on_decorator(func):
-
- def on_call(*args):
- current_time = args[0]._runtime_counters.START_RUN
- lc = args[0]._logger_counters
-
- if lc.logged and lc.logged % log_max_count == 0:
- if current_time - lc.time_to_compare <= allowed_in_seconds:
- lc.dropped += 1
- return
- lc.time_to_compare = current_time
-
- lc.logged += 1
- func(*args)
-
- return on_call
- return on_decorator
-
-
-def add_traceback(func):
- def on_call(*args):
- self = args[0]
-
- if not self.log_traceback:
- func(*args)
- else:
- if exc_info()[0]:
- func(*args)
- func(self, traceback.format_exc())
- else:
- func(*args)
-
- return on_call
-
-
-class LoggerCounters:
- def __init__(self):
- self.logged = 0
- self.dropped = 0
- self.time_to_compare = time()
-
- def __repr__(self):
- return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged,
- dropped=self.dropped)
-
-
-class BaseLogger(object):
- def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT,
- handler=logging.StreamHandler):
- """
- :param logger_name: <str>
- :param log_fmt: <str>
- :param date_fmt: <str>
- :param handler: <logging handler>
- """
- self.logger = logging.getLogger(logger_name)
- if not self.has_handlers():
- self.severity = 'INFO'
- self.logger.addHandler(handler())
- self.set_formatter(fmt=log_fmt, date_fmt=date_fmt)
-
- def __repr__(self):
- return '<Logger: {name})>'.format(name=self.logger.name)
-
- def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
- """
- :param fmt: <str>
- :param date_fmt: <str>
- :return:
- """
- if self.has_handlers():
- self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
-
- def has_handlers(self):
- return self.logger.handlers
-
- @property
- def severity(self):
- return self.logger.getEffectiveLevel()
-
- @severity.setter
- def severity(self, level):
- """
- :param level: <str> or <int>
- :return:
- """
- if level in LOGGING_LEVELS:
- self.logger.setLevel(LOGGING_LEVELS[level])
-
- def debug(self, *msg, **kwargs):
- self.logger.debug(' '.join(map(str, msg)), **kwargs)
-
- def info(self, *msg, **kwargs):
- self.logger.info(' '.join(map(str, msg)), **kwargs)
-
- def warning(self, *msg, **kwargs):
- self.logger.warning(' '.join(map(str, msg)), **kwargs)
-
- def error(self, *msg, **kwargs):
- self.logger.error(' '.join(map(str, msg)), **kwargs)
-
- def alert(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(str, msg)), **kwargs)
-
- @on_try_except_finally(on_finally=(exit, 1))
- def fatal(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(str, msg)), **kwargs)
-
-
-class PythonDLogger(object):
- def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT):
- """
- :param logger_name: <str>
- :param log_fmt: <str>
- """
- self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
- self.module_name = 'plugin'
- self.job_name = 'main'
- self._logger_counters = LoggerCounters()
-
- _LOG_TRACEBACK = False
-
- @property
- def log_traceback(self):
- return PythonDLogger._LOG_TRACEBACK
-
- @log_traceback.setter
- def log_traceback(self, value):
- PythonDLogger._LOG_TRACEBACK = value
-
- def debug(self, *msg):
- self.logger.debug(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
- def info(self, *msg):
- self.logger.info(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
- def warning(self, *msg):
- self.logger.warning(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
- @add_traceback
- def error(self, *msg):
- self.logger.error(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
- @add_traceback
- def alert(self, *msg):
- self.logger.alert(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
- def fatal(self, *msg):
- self.logger.fatal(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
-
-class PythonDLimitedLogger(PythonDLogger):
- @limiter()
- def info(self, *msg):
- PythonDLogger.info(self, *msg)
-
- @limiter()
- def warning(self, *msg):
- PythonDLogger.warning(self, *msg)
-
- @limiter()
- def error(self, *msg):
- PythonDLogger.error(self, *msg)
-
- @limiter()
- def alert(self, *msg):
- PythonDLogger.alert(self, *msg)
diff --git a/python.d/python_modules/pyyaml2/__init__.py b/python.d/python_modules/pyyaml2/__init__.py
deleted file mode 100644
index 76e19e13f..000000000
--- a/python.d/python_modules/pyyaml2/__init__.py
+++ /dev/null
@@ -1,315 +0,0 @@
-
-from error import *
-
-from tokens import *
-from events import *
-from nodes import *
-
-from loader import *
-from dumper import *
-
-__version__ = '3.11'
-
-try:
- from cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- from StringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(object):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __metaclass__ = YAMLObjectMetaclass
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
- from_yaml = classmethod(from_yaml)
-
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
- to_yaml = classmethod(to_yaml)
-
diff --git a/python.d/python_modules/pyyaml2/composer.py b/python.d/python_modules/pyyaml2/composer.py
deleted file mode 100644
index 06e5ac782..000000000
--- a/python.d/python_modules/pyyaml2/composer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from error import MarkedYAMLError
-from events import *
-from nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer(object):
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor.encode('utf-8'), event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/python.d/python_modules/pyyaml2/constructor.py b/python.d/python_modules/pyyaml2/constructor.py
deleted file mode 100644
index 635faac3e..000000000
--- a/python.d/python_modules/pyyaml2/constructor.py
+++ /dev/null
@@ -1,675 +0,0 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor(object):
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = generator.next()
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- try:
- hash(key)
- except TypeError, exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
- add_constructor = classmethod(add_constructor)
-
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
- add_multi_constructor = classmethod(add_multi_constructor)
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return BaseConstructor.construct_scalar(self, node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return BaseConstructor.construct_mapping(self, node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- u'yes': True,
- u'no': False,
- u'true': True,
- u'false': False,
- u'on': True,
- u'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- value = self.construct_scalar(node)
- try:
- return str(value).decode('base64')
- except (binascii.Error, UnicodeEncodeError), exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- ur'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- try:
- return value.encode('ascii')
- except UnicodeEncodeError:
- return value
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
- node.start_mark)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node).encode('utf-8')
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_long(self, node):
- return long(self.construct_yaml_int(node))
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if u'.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = '__builtin__'
- object_name = name
- try:
- __import__(module_name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r" % (object_name.encode('utf-8'),
- module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- class classobj: pass
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type(self.classobj)) \
- and not args and not kwds:
- instance = self.classobj()
- instance.__class__ = cls
- return instance
- elif newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/python.d/python_modules/pyyaml2/cyaml.py
deleted file mode 100644
index 68dcd7519..000000000
--- a/python.d/python_modules/pyyaml2/cyaml.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from constructor import *
-
-from serializer import *
-from representer import *
-
-from resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml2/dumper.py b/python.d/python_modules/pyyaml2/dumper.py
deleted file mode 100644
index f811d2c91..000000000
--- a/python.d/python_modules/pyyaml2/dumper.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml2/emitter.py b/python.d/python_modules/pyyaml2/emitter.py
deleted file mode 100644
index e5bcdcccb..000000000
--- a/python.d/python_modules/pyyaml2/emitter.py
+++ /dev/null
@@ -1,1140 +0,0 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from error import YAMLError
-from events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter(object):
-
- DEFAULT_TAG_PREFIXES = {
- u'!' : u'!',
- u'tag:yaml.org,2002:' : u'!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = u'\n'
- if line_break in [u'\r', u'\n', u'\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not getattr(self.stream, 'encoding', None):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = self.event.tags.keys()
- handles.sort()
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator(u'---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor(u'&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor(u'*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator(u'[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator(u'{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(u':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator(u'-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(u':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == u'')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = u'!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return u'%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (handle.encode('utf-8')))
- for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch.encode('utf-8'), handle.encode('utf-8')))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == u'!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return u''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == u'!':
- return tag
- handle = None
- suffix = tag
- prefixes = self.tag_prefixes.keys()
- prefixes.sort()
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
- if handle:
- return u'%s%s' % (handle, suffix_text)
- else:
- return u'!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch.encode('utf-8'), anchor.encode('utf-8')))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith(u'---') or scalar.startswith(u'...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in u'#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in u'?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in u',?[]{}':
- flow_indicators = True
- if ch == u':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in u'\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == u' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in u'\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = u' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = u' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = u'%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = u'%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator(u'\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != u' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
- self.write_indicator(u'\'', False)
-
- ESCAPE_REPLACEMENTS = {
- u'\0': u'0',
- u'\x07': u'a',
- u'\x08': u'b',
- u'\x09': u't',
- u'\x0A': u'n',
- u'\x0B': u'v',
- u'\x0C': u'f',
- u'\x0D': u'r',
- u'\x1B': u'e',
- u'\"': u'\"',
- u'\\': u'\\',
- u'\x85': u'N',
- u'\xA0': u'_',
- u'\u2028': u'L',
- u'\u2029': u'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator(u'"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E'
- or (self.allow_unicode
- and (u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= u'\xFF':
- data = u'\\x%02X' % ord(ch)
- elif ch <= u'\uFFFF':
- data = u'\\u%04X' % ord(ch)
- else:
- data = u'\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+u'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == u' ':
- data = u'\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator(u'"', False)
-
- def determine_block_hints(self, text):
- hints = u''
- if text:
- if text[0] in u' \n\x85\u2028\u2029':
- hints += unicode(self.best_indent)
- if text[-1] not in u'\n\x85\u2028\u2029':
- hints += u'-'
- elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
- hints += u'+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'>'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
- self.write_line_break()
- leading_space = (ch == u' ')
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'|'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u'\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = u' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
diff --git a/python.d/python_modules/pyyaml2/error.py b/python.d/python_modules/pyyaml2/error.py
deleted file mode 100644
index 577686db5..000000000
--- a/python.d/python_modules/pyyaml2/error.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark(object):
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end].encode('utf-8')
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/python.d/python_modules/pyyaml2/events.py b/python.d/python_modules/pyyaml2/events.py
deleted file mode 100644
index f79ad389c..000000000
--- a/python.d/python_modules/pyyaml2/events.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/python.d/python_modules/pyyaml2/loader.py b/python.d/python_modules/pyyaml2/loader.py
deleted file mode 100644
index 293ff467b..000000000
--- a/python.d/python_modules/pyyaml2/loader.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml2/nodes.py b/python.d/python_modules/pyyaml2/nodes.py
deleted file mode 100644
index c4f070c41..000000000
--- a/python.d/python_modules/pyyaml2/nodes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/python.d/python_modules/pyyaml2/parser.py b/python.d/python_modules/pyyaml2/parser.py
deleted file mode 100644
index f9e3057f3..000000000
--- a/python.d/python_modules/pyyaml2/parser.py
+++ /dev/null
@@ -1,589 +0,0 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser(object):
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle.encode('utf-8'),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle.encode('utf-8'),
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
diff --git a/python.d/python_modules/pyyaml2/reader.py b/python.d/python_modules/pyyaml2/reader.py
deleted file mode 100644
index 3249e6b9f..000000000
--- a/python.d/python_modules/pyyaml2/reader.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, str):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to unicode,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `str` object,
- # - a `unicode` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = u''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, unicode):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+u'\0'
- elif isinstance(stream, str):
- self.name = "<string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = ''
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
- self.line += 1
- self.column = 0
- elif ch != u'\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and len(self.raw_buffer) < 2:
- self.update_raw()
- if not isinstance(self.raw_buffer, unicode):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError, exc:
- character = exc.object[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += u'\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=1024):
- data = self.stream.read(size)
- if data:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- else:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml2/representer.py b/python.d/python_modules/pyyaml2/representer.py
deleted file mode 100644
index 5f4fc70db..000000000
--- a/python.d/python_modules/pyyaml2/representer.py
+++ /dev/null
@@ -1,484 +0,0 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import sys, copy_reg, types
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter(object):
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def get_classobj_bases(self, cls):
- bases = [cls]
- for base in cls.__bases__:
- bases.extend(self.get_classobj_bases(base))
- return bases
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if type(data) is types.InstanceType:
- data_types = self.get_classobj_bases(data.__class__)+list(data_types)
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, unicode(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
- add_representer = classmethod(add_representer)
-
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
- add_multi_representer = classmethod(add_multi_representer)
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = mapping.items()
- mapping.sort()
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, unicode, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'null')
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_bool(self, data):
- if data:
- value = u'true'
- else:
- value = u'false'
- return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- def represent_long(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = u'.nan'
- elif data == self.inf_value:
- value = u'.inf'
- elif data == -self.inf_value:
- value = u'-.inf'
- else:
- value = unicode(repr(data)).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if u'.' not in value and u'e' in value:
- value = value.replace(u'e', u'.0e', 1)
- return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = unicode(data.isoformat())
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = unicode(data.isoformat(' '))
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:python/str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- tag = None
- try:
- data.encode('ascii')
- tag = u'tag:yaml.org,2002:python/unicode'
- except UnicodeEncodeError:
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data)
-
- def represent_long(self, data):
- tag = u'tag:yaml.org,2002:int'
- if int(data) is not data:
- tag = u'tag:yaml.org,2002:python/long'
- return self.represent_scalar(tag, unicode(data))
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = u'%r' % data.real
- elif data.real == 0.0:
- data = u'%rj' % data.imag
- elif data.imag > 0:
- data = u'%r+%rj' % (data.real, data.imag)
- else:
- data = u'%r%rj' % (data.real, data.imag)
- return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
-
- def represent_module(self, data):
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
- def represent_instance(self, data):
- # For instances of classic classes, we use __getinitargs__ and
- # __getstate__ to serialize the data.
-
- # If data.__getinitargs__ exists, the object must be reconstructed by
- # calling cls(**args), where args is a tuple returned by
- # __getinitargs__. Otherwise, the cls.__init__ method should never be
- # called and the class instance is created by instantiating a trivial
- # class and assigning to the instance's __class__ variable.
-
- # If data.__getstate__ exists, it returns the state of the object.
- # Otherwise, the state of the object is data.__dict__.
-
- # We produce either a !!python/object or !!python/object/new node.
- # If data.__getinitargs__ does not exist and state is a dictionary, we
- # produce a !!python/object node . Otherwise we produce a
- # !!python/object/new node.
-
- cls = data.__class__
- class_name = u'%s.%s' % (cls.__module__, cls.__name__)
- args = None
- state = None
- if hasattr(data, '__getinitargs__'):
- args = list(data.__getinitargs__())
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__
- if args is None and isinstance(state, dict):
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+class_name, state)
- if isinstance(state, dict) and not state:
- return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:'+class_name, args)
- value = {}
- if args:
- value['args'] = args
- value['state'] = state
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copy_reg.dispatch_table:
- reduce = copy_reg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = u'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = u'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(str,
- Representer.represent_str)
-
-Representer.add_representer(unicode,
- Representer.represent_unicode)
-
-Representer.add_representer(long,
- Representer.represent_long)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.ClassType,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/python.d/python_modules/pyyaml2/resolver.py b/python.d/python_modules/pyyaml2/resolver.py
deleted file mode 100644
index 6b5ab8759..000000000
--- a/python.d/python_modules/pyyaml2/resolver.py
+++ /dev/null
@@ -1,224 +0,0 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from error import *
-from nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver(object):
-
- DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
- add_implicit_resolver = classmethod(add_implicit_resolver)
-
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, basestring) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (basestring, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
- add_path_resolver = classmethod(add_path_resolver)
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, basestring):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, basestring):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:bool',
- re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:float',
- re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:int',
- re.compile(ur'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:merge',
- re.compile(ur'^(?:<<)$'),
- [u'<'])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:null',
- re.compile(ur'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:timestamp',
- re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:value',
- re.compile(ur'^(?:=)$'),
- [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:yaml',
- re.compile(ur'^(?:!|&|\*)$'),
- list(u'!&*'))
-
diff --git a/python.d/python_modules/pyyaml2/scanner.py b/python.d/python_modules/pyyaml2/scanner.py
deleted file mode 100644
index 5228fad65..000000000
--- a/python.d/python_modules/pyyaml2/scanner.py
+++ /dev/null
@@ -1,1457 +0,0 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from error import MarkedYAMLError
-from tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey(object):
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner(object):
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == u'\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == u'%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == u'-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == u'.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == u'\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == u'[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == u'{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == u']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == u'}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == u',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == u'?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == u':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == u'*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == u'&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == u'!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == u'\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == u'\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % ch.encode('utf-8'), self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in self.possible_simple_keys.keys():
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'---' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'...' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
- and (ch == u'-' or (not self.flow_level and ch in u'?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == u'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == u'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not (u'0' <= ch <= u'9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 0
- while u'0' <= self.peek(length) <= u'9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == u' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == u'*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == u'<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != u'>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- elif ch in u'\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = u'!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = u'!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = u'!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != u'\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in u' \t'
- length = 0
- while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != u'\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == u'\n' \
- and leading_non_space and self.peek() not in u' \t':
- if not breaks:
- chunks.append(u' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == u'\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(u' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() != u' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- while self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'\\': u'\\',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
- }
-
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == u'\'' and self.peek(1) == u'\'':
- chunks.append(u'\'')
- self.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == u'\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k).encode('utf-8')), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(unichr(code))
- self.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in u' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == u'\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in u' \t':
- self.forward()
- if self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == u'#':
- break
- while True:
- ch = self.peek(length)
- if ch in u'\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == u':' and
- self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in u',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == u':'
- and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == u'#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in u' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if ch != u'!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return u''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- bytes = []
- mark = self.get_mark()
- while self.peek() == u'%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
- (self.peek(k).encode('utf-8')), self.get_mark())
- bytes.append(chr(int(self.prefix(2), 16)))
- self.forward(2)
- try:
- value = unicode(''.join(bytes), 'utf-8')
- except UnicodeDecodeError, exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in u'\r\n\x85':
- if self.prefix(2) == u'\r\n':
- self.forward(2)
- else:
- self.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
- self.forward()
- return ch
- return u''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml2/serializer.py b/python.d/python_modules/pyyaml2/serializer.py
deleted file mode 100644
index 0bf1e96dc..000000000
--- a/python.d/python_modules/pyyaml2/serializer.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from error import YAMLError
-from events import *
-from nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer(object):
-
- ANCHOR_TEMPLATE = u'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/python.d/python_modules/pyyaml2/tokens.py b/python.d/python_modules/pyyaml2/tokens.py
deleted file mode 100644
index 4d0b48a39..000000000
--- a/python.d/python_modules/pyyaml2/tokens.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/python.d/python_modules/pyyaml3/__init__.py b/python.d/python_modules/pyyaml3/__init__.py
deleted file mode 100644
index a5e20f94d..000000000
--- a/python.d/python_modules/pyyaml3/__init__.py
+++ /dev/null
@@ -1,312 +0,0 @@
-
-from .error import *
-
-from .tokens import *
-from .events import *
-from .nodes import *
-
-from .loader import *
-from .dumper import *
-
-__version__ = '3.11'
-try:
- from .cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-import io
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- stream = io.StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(metaclass=YAMLObjectMetaclass):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- @classmethod
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
-
- @classmethod
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
-
diff --git a/python.d/python_modules/pyyaml3/composer.py b/python.d/python_modules/pyyaml3/composer.py
deleted file mode 100644
index d5c6a7acd..000000000
--- a/python.d/python_modules/pyyaml3/composer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from .error import MarkedYAMLError
-from .events import *
-from .nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer:
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor, event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor, self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == '!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/python.d/python_modules/pyyaml3/constructor.py b/python.d/python_modules/pyyaml3/constructor.py
deleted file mode 100644
index 981543aeb..000000000
--- a/python.d/python_modules/pyyaml3/constructor.py
+++ /dev/null
@@ -1,686 +0,0 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from .error import *
-from .nodes import *
-
-import collections, datetime, base64, binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor:
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = next(generator)
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- if not isinstance(key, collections.Hashable):
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- @classmethod
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
-
- @classmethod
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == 'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return super().construct_scalar(node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == 'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == 'tag:yaml.org,2002:value':
- key_node.tag = 'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return super().construct_mapping(node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- 'yes': True,
- 'no': False,
- 'true': True,
- 'false': False,
- 'on': True,
- 'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- r'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- return self.construct_scalar(node)
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag,
- node.start_mark)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node)
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_bytes(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- def construct_python_long(self, node):
- return self.construct_yaml_int(node)
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name, exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if '.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = 'builtins'
- object_name = name
- try:
- __import__(module_name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name, exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r"
- % (object_name, module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bytes',
- Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/python.d/python_modules/pyyaml3/cyaml.py
deleted file mode 100644
index d5cb87e99..000000000
--- a/python.d/python_modules/pyyaml3/cyaml.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from .constructor import *
-
-from .serializer import *
-from .representer import *
-
-from .resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml3/dumper.py b/python.d/python_modules/pyyaml3/dumper.py
deleted file mode 100644
index 0b6912877..000000000
--- a/python.d/python_modules/pyyaml3/dumper.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from .emitter import *
-from .serializer import *
-from .representer import *
-from .resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml3/emitter.py b/python.d/python_modules/pyyaml3/emitter.py
deleted file mode 100644
index 34cb145a5..000000000
--- a/python.d/python_modules/pyyaml3/emitter.py
+++ /dev/null
@@ -1,1137 +0,0 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from .error import YAMLError
-from .events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis:
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter:
-
- DEFAULT_TAG_PREFIXES = {
- '!' : '!',
- 'tag:yaml.org,2002:' : '!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = '\n'
- if line_break in ['\r', '\n', '\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not hasattr(self.stream, 'encoding'):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = sorted(self.event.tags.keys())
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator('---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator('...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor('&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor('*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator('[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator('{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator('-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == '')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = '!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return '%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != '!' or handle[-1] != '!':
- raise EmitterError("tag handle must start and end with '!': %r" % handle)
- for ch in handle[1:-1]:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch, handle))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == '!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return ''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == '!':
- return tag
- handle = None
- suffix = tag
- prefixes = sorted(self.tag_prefixes.keys())
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == '!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.~*\'()[]' \
- or (ch == '!' and handle != '!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = ''.join(chunks)
- if handle:
- return '%s%s' % (handle, suffix_text)
- else:
- return '!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch, anchor))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith('---') or scalar.startswith('...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in '#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in '?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in ',?[]{}':
- flow_indicators = True
- if ch == ':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in '\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
- if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == ' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in '\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write('\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = ' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = ' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = '%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = '%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator('\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != ' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == '\'':
- data = '\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
- self.write_indicator('\'', False)
-
- ESCAPE_REPLACEMENTS = {
- '\0': '0',
- '\x07': 'a',
- '\x08': 'b',
- '\x09': 't',
- '\x0A': 'n',
- '\x0B': 'v',
- '\x0C': 'f',
- '\x0D': 'r',
- '\x1B': 'e',
- '\"': '\"',
- '\\': '\\',
- '\x85': 'N',
- '\xA0': '_',
- '\u2028': 'L',
- '\u2029': 'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator('"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
- or not ('\x20' <= ch <= '\x7E'
- or (self.allow_unicode
- and ('\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= '\xFF':
- data = '\\x%02X' % ord(ch)
- elif ch <= '\uFFFF':
- data = '\\u%04X' % ord(ch)
- else:
- data = '\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == ' ':
- data = '\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator('"', False)
-
- def determine_block_hints(self, text):
- hints = ''
- if text:
- if text[0] in ' \n\x85\u2028\u2029':
- hints += str(self.best_indent)
- if text[-1] not in '\n\x85\u2028\u2029':
- hints += '-'
- elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
- hints += '+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('>'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != ' ' \
- and text[start] == '\n':
- self.write_line_break()
- leading_space = (ch == ' ')
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- spaces = (ch == ' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('|'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in '\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = ' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
diff --git a/python.d/python_modules/pyyaml3/error.py b/python.d/python_modules/pyyaml3/error.py
deleted file mode 100644
index b796b4dc5..000000000
--- a/python.d/python_modules/pyyaml3/error.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark:
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end]
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/python.d/python_modules/pyyaml3/events.py b/python.d/python_modules/pyyaml3/events.py
deleted file mode 100644
index f79ad389c..000000000
--- a/python.d/python_modules/pyyaml3/events.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/python.d/python_modules/pyyaml3/loader.py b/python.d/python_modules/pyyaml3/loader.py
deleted file mode 100644
index 08c8f01b3..000000000
--- a/python.d/python_modules/pyyaml3/loader.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from .reader import *
-from .scanner import *
-from .parser import *
-from .composer import *
-from .constructor import *
-from .resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml3/nodes.py b/python.d/python_modules/pyyaml3/nodes.py
deleted file mode 100644
index c4f070c41..000000000
--- a/python.d/python_modules/pyyaml3/nodes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/python.d/python_modules/pyyaml3/parser.py b/python.d/python_modules/pyyaml3/parser.py
deleted file mode 100644
index 13a5995d2..000000000
--- a/python.d/python_modules/pyyaml3/parser.py
+++ /dev/null
@@ -1,589 +0,0 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-from .events import *
-from .scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser:
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- '!': '!',
- '!!': 'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == 'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == 'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle,
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle,
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == '!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == '!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == '!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), '',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), '', mark, mark)
-
diff --git a/python.d/python_modules/pyyaml3/reader.py b/python.d/python_modules/pyyaml3/reader.py
deleted file mode 100644
index f70e920f4..000000000
--- a/python.d/python_modules/pyyaml3/reader.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from .error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, bytes):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to a unicode string,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `bytes` object,
- # - a `str` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = ''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, str):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+'\0'
- elif isinstance(stream, bytes):
- self.name = "<byte string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = None
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in '\n\x85\u2028\u2029' \
- or (ch == '\r' and self.buffer[self.pointer] != '\n'):
- self.line += 1
- self.column = 0
- elif ch != '\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
- self.update_raw()
- if isinstance(self.raw_buffer, bytes):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError as exc:
- character = self.raw_buffer[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += '\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=4096):
- data = self.stream.read(size)
- if self.raw_buffer is None:
- self.raw_buffer = data
- else:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- if not data:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml3/representer.py b/python.d/python_modules/pyyaml3/representer.py
deleted file mode 100644
index 67cd6fd25..000000000
--- a/python.d/python_modules/pyyaml3/representer.py
+++ /dev/null
@@ -1,374 +0,0 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from .error import *
-from .nodes import *
-
-import datetime, sys, copyreg, types, base64
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter:
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, str(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- @classmethod
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
-
- @classmethod
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = list(mapping.items())
- try:
- mapping = sorted(mapping)
- except TypeError:
- pass
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, bytes, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar('tag:yaml.org,2002:null', 'null')
-
- def represent_str(self, data):
- return self.represent_scalar('tag:yaml.org,2002:str', data)
-
- def represent_binary(self, data):
- if hasattr(base64, 'encodebytes'):
- data = base64.encodebytes(data).decode('ascii')
- else:
- data = base64.encodestring(data).decode('ascii')
- return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
-
- def represent_bool(self, data):
- if data:
- value = 'true'
- else:
- value = 'false'
- return self.represent_scalar('tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar('tag:yaml.org,2002:int', str(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = '.nan'
- elif data == self.inf_value:
- value = '.inf'
- elif data == -self.inf_value:
- value = '-.inf'
- else:
- value = repr(data).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if '.' not in value and 'e' in value:
- value = value.replace('e', '.0e', 1)
- return self.represent_scalar('tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence('tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping('tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping('tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = data.isoformat()
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = data.isoformat(' ')
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(bytes,
- SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = '%r' % data.real
- elif data.real == 0.0:
- data = '%rj' % data.imag
- elif data.imag > 0:
- data = '%r+%rj' % (data.real, data.imag)
- else:
- data = '%r%rj' % (data.real, data.imag)
- return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = '%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
-
- def represent_module(self, data):
- return self.represent_scalar(
- 'tag:yaml.org,2002:python/module:'+data.__name__, '')
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copyreg.dispatch_table:
- reduce = copyreg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = 'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = 'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = '%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- 'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/python.d/python_modules/pyyaml3/resolver.py b/python.d/python_modules/pyyaml3/resolver.py
deleted file mode 100644
index 0eece2582..000000000
--- a/python.d/python_modules/pyyaml3/resolver.py
+++ /dev/null
@@ -1,224 +0,0 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from .error import *
-from .nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver:
-
- DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- @classmethod
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-
- @classmethod
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, str) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (str, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, str):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, str):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == '':
- resolvers = self.yaml_implicit_resolvers.get('', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:bool',
- re.compile(r'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list('yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:float',
- re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list('-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:int',
- re.compile(r'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list('-+0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:merge',
- re.compile(r'^(?:<<)$'),
- ['<'])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:null',
- re.compile(r'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- ['~', 'n', 'N', ''])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:timestamp',
- re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list('0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:value',
- re.compile(r'^(?:=)$'),
- ['='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:yaml',
- re.compile(r'^(?:!|&|\*)$'),
- list('!&*'))
-
diff --git a/python.d/python_modules/pyyaml3/scanner.py b/python.d/python_modules/pyyaml3/scanner.py
deleted file mode 100644
index 494d975ba..000000000
--- a/python.d/python_modules/pyyaml3/scanner.py
+++ /dev/null
@@ -1,1448 +0,0 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey:
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner:
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == '\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == '%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == '-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == '.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == '\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == '[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == '{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == ']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == '}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == ',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == '-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == '?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == ':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == '*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == '&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == '!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == '|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == '>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == '\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == '\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token" % ch,
- self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in list(self.possible_simple_keys):
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '---' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '...' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
- and (ch == '-' or (not self.flow_level and ch in '?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == '\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == 'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == 'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r" % self.peek(),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not ('0' <= ch <= '9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch, self.get_mark())
- length = 0
- while '0' <= self.peek(length) <= '9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == ' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != ' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch, self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == '*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == '<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != '>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- elif ch in '\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = '!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in '\0 \r\n\x85\u2028\u2029':
- if ch == '!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = '!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = '!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = ''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != '\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in ' \t'
- length = 0
- while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != '\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == '\n' \
- and leading_non_space and self.peek() not in ' \t':
- if not breaks:
- chunks.append(' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == '\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch, self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r" % ch,
- self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() != ' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- while self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- '0': '\0',
- 'a': '\x07',
- 'b': '\x08',
- 't': '\x09',
- '\t': '\x09',
- 'n': '\x0A',
- 'v': '\x0B',
- 'f': '\x0C',
- 'r': '\x0D',
- 'e': '\x1B',
- ' ': '\x20',
- '\"': '\"',
- '\\': '\\',
- 'N': '\x85',
- '_': '\xA0',
- 'L': '\u2028',
- 'P': '\u2029',
- }
-
- ESCAPE_CODES = {
- 'x': 2,
- 'u': 4,
- 'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == '\'' and self.peek(1) == '\'':
- chunks.append('\'')
- self.forward(2)
- elif (double and ch == '\'') or (not double and ch in '\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == '\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k)), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(chr(code))
- self.forward(length)
- elif ch in '\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch, self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in ' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == '\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in ' \t':
- self.forward()
- if self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == '#':
- break
- while True:
- ch = self.peek(length)
- if ch in '\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == ':' and
- self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in ',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == ':'
- and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == '#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in ' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != '!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != ' ':
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if ch != '!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.!~*\'()[]%':
- if ch == '%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch, self.get_mark())
- return ''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- codes = []
- mark = self.get_mark()
- while self.peek() == '%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
- % self.peek(k), self.get_mark())
- codes.append(int(self.prefix(2), 16))
- self.forward(2)
- try:
- value = bytes(codes).decode('utf-8')
- except UnicodeDecodeError as exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in '\r\n\x85':
- if self.prefix(2) == '\r\n':
- self.forward(2)
- else:
- self.forward()
- return '\n'
- elif ch in '\u2028\u2029':
- self.forward()
- return ch
- return ''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml3/serializer.py b/python.d/python_modules/pyyaml3/serializer.py
deleted file mode 100644
index fe911e67a..000000000
--- a/python.d/python_modules/pyyaml3/serializer.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from .error import YAMLError
-from .events import *
-from .nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer:
-
- ANCHOR_TEMPLATE = 'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/python.d/python_modules/pyyaml3/tokens.py b/python.d/python_modules/pyyaml3/tokens.py
deleted file mode 100644
index 4d0b48a39..000000000
--- a/python.d/python_modules/pyyaml3/tokens.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/python.d/python_modules/third_party/__init__.py b/python.d/python_modules/third_party/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/python.d/python_modules/third_party/__init__.py
+++ /dev/null
diff --git a/python.d/python_modules/third_party/lm_sensors.py b/python.d/python_modules/third_party/lm_sensors.py
deleted file mode 100644
index 1d868f0e2..000000000
--- a/python.d/python_modules/third_party/lm_sensors.py
+++ /dev/null
@@ -1,257 +0,0 @@
-"""
-@package sensors.py
-Python Bindings for libsensors3
-
-use the documentation of libsensors for the low level API.
-see example.py for high level API usage.
-
-@author: Pavel Rojtberg (http://www.rojtberg.net)
-@see: https://github.com/paroj/sensors.py
-@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
-"""
-
-from ctypes import *
-import ctypes.util
-
-_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
-# see https://github.com/paroj/sensors.py/issues/1
-_libc.free.argtypes = [c_void_p]
-_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
-
-version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
-
-
-class bus_id(Structure):
- _fields_ = [("type", c_short),
- ("nr", c_short)]
-
-
-class chip_name(Structure):
- _fields_ = [("prefix", c_char_p),
- ("bus", bus_id),
- ("addr", c_int),
- ("path", c_char_p)]
-
-
-class feature(Structure):
- _fields_ = [("name", c_char_p),
- ("number", c_int),
- ("type", c_int)]
-
- # sensors_feature_type
- IN = 0x00
- FAN = 0x01
- TEMP = 0x02
- POWER = 0x03
- ENERGY = 0x04
- CURR = 0x05
- HUMIDITY = 0x06
- MAX_MAIN = 0x7
- VID = 0x10
- INTRUSION = 0x11
- MAX_OTHER = 0x12
- BEEP_ENABLE = 0x18
-
-
-class subfeature(Structure):
- _fields_ = [("name", c_char_p),
- ("number", c_int),
- ("type", c_int),
- ("mapping", c_int),
- ("flags", c_uint)]
-
-
-_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
-_hdl.sensors_get_features.restype = POINTER(feature)
-_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
-_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
-_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
-_hdl.sensors_strerror.restype = c_char_p
-
-### RAW API ###
-MODE_R = 1
-MODE_W = 2
-COMPUTE_MAPPING = 4
-
-
-def init(cfg_file=None):
- file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
-
- if _hdl.sensors_init(file) != 0:
- raise Exception("sensors_init failed")
-
- if file is not None:
- _libc.fclose(file)
-
-
-def cleanup():
- _hdl.sensors_cleanup()
-
-
-def parse_chip_name(orig_name):
- ret = chip_name()
- err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
-
- if err < 0:
- raise Exception(strerror(err))
-
- return ret
-
-
-def strerror(errnum):
- return _hdl.sensors_strerror(errnum).decode("utf-8")
-
-
-def free_chip_name(chip):
- _hdl.sensors_free_chip_name(byref(chip))
-
-
-def get_detected_chips(match, nr):
- """
- @return: (chip, next nr to query)
- """
- _nr = c_int(nr)
-
- if match is not None:
- match = byref(match)
-
- chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
- chip = chip.contents if bool(chip) else None
- return chip, _nr.value
-
-
-def chip_snprintf_name(chip, buffer_size=200):
- """
- @param buffer_size defaults to the size used in the sensors utility
- """
- ret = create_string_buffer(buffer_size)
- err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
-
- if err < 0:
- raise Exception(strerror(err))
-
- return ret.value.decode("utf-8")
-
-
-def do_chip_sets(chip):
- """
- @attention this function was not tested
- """
- err = _hdl.sensors_do_chip_sets(byref(chip))
- if err < 0:
- raise Exception(strerror(err))
-
-
-def get_adapter_name(bus):
- return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
-
-
-def get_features(chip, nr):
- """
- @return: (feature, next nr to query)
- """
- _nr = c_int(nr)
- feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
- feature = feature.contents if bool(feature) else None
- return feature, _nr.value
-
-
-def get_label(chip, feature):
- ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
- val = cast(ptr, c_char_p).value.decode("utf-8")
- _libc.free(ptr)
- return val
-
-
-def get_all_subfeatures(chip, feature, nr):
- """
- @return: (subfeature, next nr to query)
- """
- _nr = c_int(nr)
- subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
- subfeature = subfeature.contents if bool(subfeature) else None
- return subfeature, _nr.value
-
-
-def get_value(chip, subfeature_nr):
- val = c_double()
- err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
- if err < 0:
- raise Exception(strerror(err))
- return val.value
-
-
-def set_value(chip, subfeature_nr, value):
- """
- @attention this function was not tested
- """
- val = c_double(value)
- err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
- if err < 0:
- raise Exception(strerror(err))
-
-
-### Convenience API ###
-class ChipIterator:
- def __init__(self, match=None):
- self.match = parse_chip_name(match) if match is not None else None
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- chip, self.nr = get_detected_chips(self.match, self.nr)
-
- if chip is None:
- raise StopIteration
-
- return chip
-
- def __del__(self):
- if self.match is not None:
- free_chip_name(self.match)
-
- def next(self): # python2 compability
- return self.__next__()
-
-
-class FeatureIterator:
- def __init__(self, chip):
- self.chip = chip
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- feature, self.nr = get_features(self.chip, self.nr)
-
- if feature is None:
- raise StopIteration
-
- return feature
-
- def next(self): # python2 compability
- return self.__next__()
-
-
-class SubFeatureIterator:
- def __init__(self, chip, feature):
- self.chip = chip
- self.feature = feature
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
-
- if subfeature is None:
- raise StopIteration
-
- return subfeature
-
- def next(self): # python2 compability
- return self.__next__() \ No newline at end of file
diff --git a/python.d/python_modules/third_party/ordereddict.py b/python.d/python_modules/third_party/ordereddict.py
deleted file mode 100644
index d0b97d47c..000000000
--- a/python.d/python_modules/third_party/ordereddict.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-
-from UserDict import DictMixin
-
-
-class OrderedDict(dict, DictMixin):
-
- def __init__(self, *args, **kwds):
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__end
- except AttributeError:
- self.clear()
- self.update(*args, **kwds)
-
- def clear(self):
- self.__end = end = []
- end += [None, end, end] # sentinel node for doubly linked list
- self.__map = {} # key --> [key, prev, next]
- dict.clear(self)
-
- def __setitem__(self, key, value):
- if key not in self:
- end = self.__end
- curr = end[1]
- curr[2] = end[1] = self.__map[key] = [key, curr, end]
- dict.__setitem__(self, key, value)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- key, prev, next = self.__map.pop(key)
- prev[2] = next
- next[1] = prev
-
- def __iter__(self):
- end = self.__end
- curr = end[2]
- while curr is not end:
- yield curr[0]
- curr = curr[2]
-
- def __reversed__(self):
- end = self.__end
- curr = end[1]
- while curr is not end:
- yield curr[0]
- curr = curr[1]
-
- def popitem(self, last=True):
- if not self:
- raise KeyError('dictionary is empty')
- if last:
- key = reversed(self).next()
- else:
- key = iter(self).next()
- value = self.pop(key)
- return key, value
-
- def __reduce__(self):
- items = [[k, self[k]] for k in self]
- tmp = self.__map, self.__end
- del self.__map, self.__end
- inst_dict = vars(self).copy()
- self.__map, self.__end = tmp
- if inst_dict:
- return self.__class__, (items,), inst_dict
- return self.__class__, (items,)
-
- def keys(self):
- return list(self)
-
- setdefault = DictMixin.setdefault
- update = DictMixin.update
- pop = DictMixin.pop
- values = DictMixin.values
- items = DictMixin.items
- iterkeys = DictMixin.iterkeys
- itervalues = DictMixin.itervalues
- iteritems = DictMixin.iteritems
-
- def __repr__(self):
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
-
- def copy(self):
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- if isinstance(other, OrderedDict):
- if len(self) != len(other):
- return False
- for p, q in zip(self.items(), other.items()):
- if p != q:
- return False
- return True
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
diff --git a/python.d/python_modules/urllib3/__init__.py b/python.d/python_modules/urllib3/__init__.py
deleted file mode 100644
index 26493ecb9..000000000
--- a/python.d/python_modules/urllib3/__init__.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-urllib3 - Thread-safe connection pooling and re-using.
-"""
-
-from __future__ import absolute_import
-import warnings
-
-from .connectionpool import (
- HTTPConnectionPool,
- HTTPSConnectionPool,
- connection_from_url
-)
-
-from . import exceptions
-from .filepost import encode_multipart_formdata
-from .poolmanager import PoolManager, ProxyManager, proxy_from_url
-from .response import HTTPResponse
-from .util.request import make_headers
-from .util.url import get_host
-from .util.timeout import Timeout
-from .util.retry import Retry
-
-
-# Set default logging handler to avoid "No handler found" warnings.
-import logging
-try: # Python 2.7+
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
-__license__ = 'MIT'
-__version__ = '1.21.1'
-
-__all__ = (
- 'HTTPConnectionPool',
- 'HTTPSConnectionPool',
- 'PoolManager',
- 'ProxyManager',
- 'HTTPResponse',
- 'Retry',
- 'Timeout',
- 'add_stderr_logger',
- 'connection_from_url',
- 'disable_warnings',
- 'encode_multipart_formdata',
- 'get_host',
- 'make_headers',
- 'proxy_from_url',
-)
-
-logging.getLogger(__name__).addHandler(NullHandler())
-
-
-def add_stderr_logger(level=logging.DEBUG):
- """
- Helper for quickly adding a StreamHandler to the logger. Useful for
- debugging.
-
- Returns the handler after adding it.
- """
- # This method needs to be in this __init__.py to get the __name__ correct
- # even if urllib3 is vendored within another package.
- logger = logging.getLogger(__name__)
- handler = logging.StreamHandler()
- handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
- logger.addHandler(handler)
- logger.setLevel(level)
- logger.debug('Added a stderr logging handler to logger: %s', __name__)
- return handler
-
-
-# ... Clean up.
-del NullHandler
-
-
-# All warning filters *must* be appended unless you're really certain that they
-# shouldn't be: otherwise, it's very hard for users to use most Python
-# mechanisms to silence them.
-# SecurityWarning's always go off by default.
-warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
-# SubjectAltNameWarning's should go off once per host
-warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
-# InsecurePlatformWarning's don't vary between requests, so we keep it default.
-warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
- append=True)
-# SNIMissingWarnings should go off only once.
-warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
-
-
-def disable_warnings(category=exceptions.HTTPWarning):
- """
- Helper for quickly disabling all urllib3 warnings.
- """
- warnings.simplefilter('ignore', category)
diff --git a/python.d/python_modules/urllib3/_collections.py b/python.d/python_modules/urllib3/_collections.py
deleted file mode 100644
index 4849ddecd..000000000
--- a/python.d/python_modules/urllib3/_collections.py
+++ /dev/null
@@ -1,314 +0,0 @@
-from __future__ import absolute_import
-from collections import Mapping, MutableMapping
-try:
- from threading import RLock
-except ImportError: # Platform-specific: No threads available
- class RLock:
- def __enter__(self):
- pass
-
- def __exit__(self, exc_type, exc_value, traceback):
- pass
-
-
-try: # Python 2.7+
- from collections import OrderedDict
-except ImportError:
- from .packages.ordered_dict import OrderedDict
-from .packages.six import iterkeys, itervalues, PY3
-
-
-__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
-
-
-_Null = object()
-
-
-class RecentlyUsedContainer(MutableMapping):
- """
- Provides a thread-safe dict-like container which maintains up to
- ``maxsize`` keys while throwing away the least-recently-used keys beyond
- ``maxsize``.
-
- :param maxsize:
- Maximum number of recent elements to retain.
-
- :param dispose_func:
- Every time an item is evicted from the container,
- ``dispose_func(value)`` is called. Callback which will get called
- """
-
- ContainerCls = OrderedDict
-
- def __init__(self, maxsize=10, dispose_func=None):
- self._maxsize = maxsize
- self.dispose_func = dispose_func
-
- self._container = self.ContainerCls()
- self.lock = RLock()
-
- def __getitem__(self, key):
- # Re-insert the item, moving it to the end of the eviction line.
- with self.lock:
- item = self._container.pop(key)
- self._container[key] = item
- return item
-
- def __setitem__(self, key, value):
- evicted_value = _Null
- with self.lock:
- # Possibly evict the existing value of 'key'
- evicted_value = self._container.get(key, _Null)
- self._container[key] = value
-
- # If we didn't evict an existing value, we might have to evict the
- # least recently used item from the beginning of the container.
- if len(self._container) > self._maxsize:
- _key, evicted_value = self._container.popitem(last=False)
-
- if self.dispose_func and evicted_value is not _Null:
- self.dispose_func(evicted_value)
-
- def __delitem__(self, key):
- with self.lock:
- value = self._container.pop(key)
-
- if self.dispose_func:
- self.dispose_func(value)
-
- def __len__(self):
- with self.lock:
- return len(self._container)
-
- def __iter__(self):
- raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
-
- def clear(self):
- with self.lock:
- # Copy pointers to all values, then wipe the mapping
- values = list(itervalues(self._container))
- self._container.clear()
-
- if self.dispose_func:
- for value in values:
- self.dispose_func(value)
-
- def keys(self):
- with self.lock:
- return list(iterkeys(self._container))
-
-
-class HTTPHeaderDict(MutableMapping):
- """
- :param headers:
- An iterable of field-value pairs. Must not contain multiple field names
- when compared case-insensitively.
-
- :param kwargs:
- Additional field-value pairs to pass in to ``dict.update``.
-
- A ``dict`` like container for storing HTTP Headers.
-
- Field names are stored and compared case-insensitively in compliance with
- RFC 7230. Iteration provides the first case-sensitive key seen for each
- case-insensitive pair.
-
- Using ``__setitem__`` syntax overwrites fields that compare equal
- case-insensitively in order to maintain ``dict``'s api. For fields that
- compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
- in a loop.
-
- If multiple fields that are equal case-insensitively are passed to the
- constructor or ``.update``, the behavior is undefined and some will be
- lost.
-
- >>> headers = HTTPHeaderDict()
- >>> headers.add('Set-Cookie', 'foo=bar')
- >>> headers.add('set-cookie', 'baz=quxx')
- >>> headers['content-length'] = '7'
- >>> headers['SET-cookie']
- 'foo=bar, baz=quxx'
- >>> headers['Content-Length']
- '7'
- """
-
- def __init__(self, headers=None, **kwargs):
- super(HTTPHeaderDict, self).__init__()
- self._container = OrderedDict()
- if headers is not None:
- if isinstance(headers, HTTPHeaderDict):
- self._copy_from(headers)
- else:
- self.extend(headers)
- if kwargs:
- self.extend(kwargs)
-
- def __setitem__(self, key, val):
- self._container[key.lower()] = [key, val]
- return self._container[key.lower()]
-
- def __getitem__(self, key):
- val = self._container[key.lower()]
- return ', '.join(val[1:])
-
- def __delitem__(self, key):
- del self._container[key.lower()]
-
- def __contains__(self, key):
- return key.lower() in self._container
-
- def __eq__(self, other):
- if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
- return False
- if not isinstance(other, type(self)):
- other = type(self)(other)
- return (dict((k.lower(), v) for k, v in self.itermerged()) ==
- dict((k.lower(), v) for k, v in other.itermerged()))
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- if not PY3: # Python 2
- iterkeys = MutableMapping.iterkeys
- itervalues = MutableMapping.itervalues
-
- __marker = object()
-
- def __len__(self):
- return len(self._container)
-
- def __iter__(self):
- # Only provide the originally cased names
- for vals in self._container.values():
- yield vals[0]
-
- def pop(self, key, default=__marker):
- '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
- '''
- # Using the MutableMapping function directly fails due to the private marker.
- # Using ordinary dict.pop would expose the internal structures.
- # So let's reinvent the wheel.
- try:
- value = self[key]
- except KeyError:
- if default is self.__marker:
- raise
- return default
- else:
- del self[key]
- return value
-
- def discard(self, key):
- try:
- del self[key]
- except KeyError:
- pass
-
- def add(self, key, val):
- """Adds a (name, value) pair, doesn't overwrite the value if it already
- exists.
-
- >>> headers = HTTPHeaderDict(foo='bar')
- >>> headers.add('Foo', 'baz')
- >>> headers['foo']
- 'bar, baz'
- """
- key_lower = key.lower()
- new_vals = [key, val]
- # Keep the common case aka no item present as fast as possible
- vals = self._container.setdefault(key_lower, new_vals)
- if new_vals is not vals:
- vals.append(val)
-
- def extend(self, *args, **kwargs):
- """Generic import function for any type of header-like object.
- Adapted version of MutableMapping.update in order to insert items
- with self.add instead of self.__setitem__
- """
- if len(args) > 1:
- raise TypeError("extend() takes at most 1 positional "
- "arguments ({0} given)".format(len(args)))
- other = args[0] if len(args) >= 1 else ()
-
- if isinstance(other, HTTPHeaderDict):
- for key, val in other.iteritems():
- self.add(key, val)
- elif isinstance(other, Mapping):
- for key in other:
- self.add(key, other[key])
- elif hasattr(other, "keys"):
- for key in other.keys():
- self.add(key, other[key])
- else:
- for key, value in other:
- self.add(key, value)
-
- for key, value in kwargs.items():
- self.add(key, value)
-
- def getlist(self, key):
- """Returns a list of all the values for the named field. Returns an
- empty list if the key doesn't exist."""
- try:
- vals = self._container[key.lower()]
- except KeyError:
- return []
- else:
- return vals[1:]
-
- # Backwards compatibility for httplib
- getheaders = getlist
- getallmatchingheaders = getlist
- iget = getlist
-
- def __repr__(self):
- return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
-
- def _copy_from(self, other):
- for key in other:
- val = other.getlist(key)
- if isinstance(val, list):
- # Don't need to convert tuples
- val = list(val)
- self._container[key.lower()] = [key] + val
-
- def copy(self):
- clone = type(self)()
- clone._copy_from(self)
- return clone
-
- def iteritems(self):
- """Iterate over all header lines, including duplicate ones."""
- for key in self:
- vals = self._container[key.lower()]
- for val in vals[1:]:
- yield vals[0], val
-
- def itermerged(self):
- """Iterate over all headers, merging duplicate ones together."""
- for key in self:
- val = self._container[key.lower()]
- yield val[0], ', '.join(val[1:])
-
- def items(self):
- return list(self.iteritems())
-
- @classmethod
- def from_httplib(cls, message): # Python 2
- """Read headers from a Python 2 httplib message object."""
- # python2.7 does not expose a proper API for exporting multiheaders
- # efficiently. This function re-reads raw lines from the message
- # object and extracts the multiheaders properly.
- headers = []
-
- for line in message.headers:
- if line.startswith((' ', '\t')):
- key, value = headers[-1]
- headers[-1] = (key, value + '\r\n' + line.rstrip())
- continue
-
- key, value = line.split(':', 1)
- headers.append((key, value.strip()))
-
- return cls(headers)
diff --git a/python.d/python_modules/urllib3/connection.py b/python.d/python_modules/urllib3/connection.py
deleted file mode 100644
index c0d832998..000000000
--- a/python.d/python_modules/urllib3/connection.py
+++ /dev/null
@@ -1,373 +0,0 @@
-from __future__ import absolute_import
-import datetime
-import logging
-import os
-import sys
-import socket
-from socket import error as SocketError, timeout as SocketTimeout
-import warnings
-from .packages import six
-from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
-from .packages.six.moves.http_client import HTTPException # noqa: F401
-
-try: # Compiled with SSL?
- import ssl
- BaseSSLError = ssl.SSLError
-except (ImportError, AttributeError): # Platform-specific: No SSL.
- ssl = None
-
- class BaseSSLError(BaseException):
- pass
-
-
-try: # Python 3:
- # Not a no-op, we're adding this to the namespace so it can be imported.
- ConnectionError = ConnectionError
-except NameError: # Python 2:
- class ConnectionError(Exception):
- pass
-
-
-from .exceptions import (
- NewConnectionError,
- ConnectTimeoutError,
- SubjectAltNameWarning,
- SystemTimeWarning,
-)
-from .packages.ssl_match_hostname import match_hostname, CertificateError
-
-from .util.ssl_ import (
- resolve_cert_reqs,
- resolve_ssl_version,
- assert_fingerprint,
- create_urllib3_context,
- ssl_wrap_socket
-)
-
-
-from .util import connection
-
-from ._collections import HTTPHeaderDict
-
-log = logging.getLogger(__name__)
-
-port_by_scheme = {
- 'http': 80,
- 'https': 443,
-}
-
-# When updating RECENT_DATE, move it to
-# within two years of the current date, and no
-# earlier than 6 months ago.
-RECENT_DATE = datetime.date(2016, 1, 1)
-
-
-class DummyConnection(object):
- """Used to detect a failed ConnectionCls import."""
- pass
-
-
-class HTTPConnection(_HTTPConnection, object):
- """
- Based on httplib.HTTPConnection but provides an extra constructor
- backwards-compatibility layer between older and newer Pythons.
-
- Additional keyword parameters are used to configure attributes of the connection.
- Accepted parameters include:
-
- - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- - ``source_address``: Set the source address for the current connection.
-
- .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
-
- - ``socket_options``: Set specific options on the underlying socket. If not specified, then
- defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
- Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
-
- For example, if you wish to enable TCP Keep Alive in addition to the defaults,
- you might pass::
-
- HTTPConnection.default_socket_options + [
- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
- ]
-
- Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
- """
-
- default_port = port_by_scheme['http']
-
- #: Disable Nagle's algorithm by default.
- #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
- default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
-
- #: Whether this connection verifies the host's certificate.
- is_verified = False
-
- def __init__(self, *args, **kw):
- if six.PY3: # Python 3
- kw.pop('strict', None)
-
- # Pre-set source_address in case we have an older Python like 2.6.
- self.source_address = kw.get('source_address')
-
- if sys.version_info < (2, 7): # Python 2.6
- # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
- # not newer versions. We can still use it when creating a
- # connection though, so we pop it *after* we have saved it as
- # self.source_address.
- kw.pop('source_address', None)
-
- #: The socket options provided by the user. If no options are
- #: provided, we use the default options.
- self.socket_options = kw.pop('socket_options', self.default_socket_options)
-
- # Superclass also sets self.source_address in Python 2.7+.
- _HTTPConnection.__init__(self, *args, **kw)
-
- def _new_conn(self):
- """ Establish a socket connection and set nodelay settings on it.
-
- :return: New socket connection.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw['source_address'] = self.source_address
-
- if self.socket_options:
- extra_kw['socket_options'] = self.socket_options
-
- try:
- conn = connection.create_connection(
- (self.host, self.port), self.timeout, **extra_kw)
-
- except SocketTimeout as e:
- raise ConnectTimeoutError(
- self, "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout))
-
- except SocketError as e:
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e)
-
- return conn
-
- def _prepare_conn(self, conn):
- self.sock = conn
- # the _tunnel_host attribute was added in python 2.6.3 (via
- # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
- # not have them.
- if getattr(self, '_tunnel_host', None):
- # TODO: Fix tunnel so it doesn't depend on self.sock state.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- def request_chunked(self, method, url, body=None, headers=None):
- """
- Alternative to the common request method, which sends the
- body with chunked encoding and not as one block
- """
- headers = HTTPHeaderDict(headers if headers is not None else {})
- skip_accept_encoding = 'accept-encoding' in headers
- skip_host = 'host' in headers
- self.putrequest(
- method,
- url,
- skip_accept_encoding=skip_accept_encoding,
- skip_host=skip_host
- )
- for header, value in headers.items():
- self.putheader(header, value)
- if 'transfer-encoding' not in headers:
- self.putheader('Transfer-Encoding', 'chunked')
- self.endheaders()
-
- if body is not None:
- stringish_types = six.string_types + (six.binary_type,)
- if isinstance(body, stringish_types):
- body = (body,)
- for chunk in body:
- if not chunk:
- continue
- if not isinstance(chunk, six.binary_type):
- chunk = chunk.encode('utf8')
- len_str = hex(len(chunk))[2:]
- self.send(len_str.encode('utf-8'))
- self.send(b'\r\n')
- self.send(chunk)
- self.send(b'\r\n')
-
- # After the if clause, to always have a closed body
- self.send(b'0\r\n\r\n')
-
-
-class HTTPSConnection(HTTPConnection):
- default_port = port_by_scheme['https']
-
- ssl_version = None
-
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- ssl_context=None, **kw):
-
- HTTPConnection.__init__(self, host, port, strict=strict,
- timeout=timeout, **kw)
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.ssl_context = ssl_context
-
- # Required property for Google AppEngine 1.9.0 which otherwise causes
- # HTTPS requests to go out as HTTP. (See Issue #356)
- self._protocol = 'https'
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- if self.ssl_context is None:
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(None),
- cert_reqs=resolve_cert_reqs(None),
- )
-
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- ssl_context=self.ssl_context,
- )
-
-
-class VerifiedHTTPSConnection(HTTPSConnection):
- """
- Based on httplib.HTTPSConnection but wraps the socket with
- SSL certification.
- """
- cert_reqs = None
- ca_certs = None
- ca_cert_dir = None
- ssl_version = None
- assert_fingerprint = None
-
- def set_cert(self, key_file=None, cert_file=None,
- cert_reqs=None, ca_certs=None,
- assert_hostname=None, assert_fingerprint=None,
- ca_cert_dir=None):
- """
- This method should only be called once, before the connection is used.
- """
- # If cert_reqs is not provided, we can try to guess. If the user gave
- # us a cert database, we assume they want to use it: otherwise, if
- # they gave us an SSL Context object we should use whatever is set for
- # it.
- if cert_reqs is None:
- if ca_certs or ca_cert_dir:
- cert_reqs = 'CERT_REQUIRED'
- elif self.ssl_context is not None:
- cert_reqs = self.ssl_context.verify_mode
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
- self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
- self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
-
- def connect(self):
- # Add certificate verification
- conn = self._new_conn()
-
- hostname = self.host
- if getattr(self, '_tunnel_host', None):
- # _tunnel_host was added in Python 2.6.3
- # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
-
- self.sock = conn
- # Calls self._set_hostport(), so self.host is
- # self._tunnel_host below.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- # Override the host with the one we're requesting data from.
- hostname = self._tunnel_host
-
- is_time_off = datetime.date.today() < RECENT_DATE
- if is_time_off:
- warnings.warn((
- 'System time is way off (before {0}). This will probably '
- 'lead to SSL verification errors').format(RECENT_DATE),
- SystemTimeWarning
- )
-
- # Wrap socket using verification with the root certs in
- # trusted_root_certs
- if self.ssl_context is None:
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(self.ssl_version),
- cert_reqs=resolve_cert_reqs(self.cert_reqs),
- )
-
- context = self.ssl_context
- context.verify_mode = resolve_cert_reqs(self.cert_reqs)
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- server_hostname=hostname,
- ssl_context=context)
-
- if self.assert_fingerprint:
- assert_fingerprint(self.sock.getpeercert(binary_form=True),
- self.assert_fingerprint)
- elif context.verify_mode != ssl.CERT_NONE \
- and not getattr(context, 'check_hostname', False) \
- and self.assert_hostname is not False:
- # While urllib3 attempts to always turn off hostname matching from
- # the TLS library, this cannot always be done. So we check whether
- # the TLS Library still thinks it's matching hostnames.
- cert = self.sock.getpeercert()
- if not cert.get('subjectAltName', ()):
- warnings.warn((
- 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
- '`commonName` for now. This feature is being removed by major browsers and '
- 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
- 'for details.)'.format(hostname)),
- SubjectAltNameWarning
- )
- _match_hostname(cert, self.assert_hostname or hostname)
-
- self.is_verified = (
- context.verify_mode == ssl.CERT_REQUIRED or
- self.assert_fingerprint is not None
- )
-
-
-def _match_hostname(cert, asserted_hostname):
- try:
- match_hostname(cert, asserted_hostname)
- except CertificateError as e:
- log.error(
- 'Certificate did not match expected hostname: %s. '
- 'Certificate: %s', asserted_hostname, cert
- )
- # Add cert to exception and reraise so client code can inspect
- # the cert when catching the exception, if they want to
- e._peer_cert = cert
- raise
-
-
-if ssl:
- # Make a copy for testing.
- UnverifiedHTTPSConnection = HTTPSConnection
- HTTPSConnection = VerifiedHTTPSConnection
-else:
- HTTPSConnection = DummyConnection
diff --git a/python.d/python_modules/urllib3/connectionpool.py b/python.d/python_modules/urllib3/connectionpool.py
deleted file mode 100644
index b4f1166a6..000000000
--- a/python.d/python_modules/urllib3/connectionpool.py
+++ /dev/null
@@ -1,899 +0,0 @@
-from __future__ import absolute_import
-import errno
-import logging
-import sys
-import warnings
-
-from socket import error as SocketError, timeout as SocketTimeout
-import socket
-
-
-from .exceptions import (
- ClosedPoolError,
- ProtocolError,
- EmptyPoolError,
- HeaderParsingError,
- HostChangedError,
- LocationValueError,
- MaxRetryError,
- ProxyError,
- ReadTimeoutError,
- SSLError,
- TimeoutError,
- InsecureRequestWarning,
- NewConnectionError,
-)
-from .packages.ssl_match_hostname import CertificateError
-from .packages import six
-from .packages.six.moves import queue
-from .connection import (
- port_by_scheme,
- DummyConnection,
- HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
- HTTPException, BaseSSLError,
-)
-from .request import RequestMethods
-from .response import HTTPResponse
-
-from .util.connection import is_connection_dropped
-from .util.request import set_file_position
-from .util.response import assert_header_parsing
-from .util.retry import Retry
-from .util.timeout import Timeout
-from .util.url import get_host, Url
-
-
-if six.PY2:
- # Queue is imported for side effects on MS Windows
- import Queue as _unused_module_Queue # noqa: F401
-
-xrange = six.moves.xrange
-
-log = logging.getLogger(__name__)
-
-_Default = object()
-
-
-# Pool objects
-class ConnectionPool(object):
- """
- Base class for all connection pools, such as
- :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
- """
-
- scheme = None
- QueueCls = queue.LifoQueue
-
- def __init__(self, host, port=None):
- if not host:
- raise LocationValueError("No host specified.")
-
- self.host = _ipv6_host(host).lower()
- self.port = port
-
- def __str__(self):
- return '%s(host=%r, port=%r)' % (type(self).__name__,
- self.host, self.port)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.close()
- # Return False to re-raise any potential exceptions
- return False
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- pass
-
-
-# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
-_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
-
-
-class HTTPConnectionPool(ConnectionPool, RequestMethods):
- """
- Thread-safe connection pool for one host.
-
- :param host:
- Host used for this HTTP Connection (e.g. "localhost"), passed into
- :class:`httplib.HTTPConnection`.
-
- :param port:
- Port used for this HTTP Connection (None is equivalent to 80), passed
- into :class:`httplib.HTTPConnection`.
-
- :param strict:
- Causes BadStatusLine to be raised if the status line can't be parsed
- as a valid HTTP/1.0 or 1.1 status line, passed into
- :class:`httplib.HTTPConnection`.
-
- .. note::
- Only works in Python 2. This parameter is ignored in Python 3.
-
- :param timeout:
- Socket timeout in seconds for each individual connection. This can
- be a float or integer, which sets the timeout for the HTTP request,
- or an instance of :class:`urllib3.util.Timeout` which gives you more
- fine-grained control over request timeouts. After the constructor has
- been parsed, this is always a `urllib3.util.Timeout` object.
-
- :param maxsize:
- Number of connections to save that can be reused. More than 1 is useful
- in multithreaded situations. If ``block`` is set to False, more
- connections will be created but they will not be saved once they've
- been used.
-
- :param block:
- If set to True, no more than ``maxsize`` connections will be used at
- a time. When no free connections are available, the call will block
- until a connection has been released. This is a useful side effect for
- particular multithreaded situations where one does not want to use more
- than maxsize connections per host to prevent flooding.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param retries:
- Retry configuration to use by default with requests in this pool.
-
- :param _proxy:
- Parsed proxy URL, should not be used directly, instead, see
- :class:`urllib3.connectionpool.ProxyManager`"
-
- :param _proxy_headers:
- A dictionary with proxy headers, should not be used directly,
- instead, see :class:`urllib3.connectionpool.ProxyManager`"
-
- :param \\**conn_kw:
- Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
- :class:`urllib3.connection.HTTPSConnection` instances.
- """
-
- scheme = 'http'
- ConnectionCls = HTTPConnection
- ResponseCls = HTTPResponse
-
- def __init__(self, host, port=None, strict=False,
- timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
- headers=None, retries=None,
- _proxy=None, _proxy_headers=None,
- **conn_kw):
- ConnectionPool.__init__(self, host, port)
- RequestMethods.__init__(self, headers)
-
- self.strict = strict
-
- if not isinstance(timeout, Timeout):
- timeout = Timeout.from_float(timeout)
-
- if retries is None:
- retries = Retry.DEFAULT
-
- self.timeout = timeout
- self.retries = retries
-
- self.pool = self.QueueCls(maxsize)
- self.block = block
-
- self.proxy = _proxy
- self.proxy_headers = _proxy_headers or {}
-
- # Fill the queue up so that doing get() on it will block properly
- for _ in xrange(maxsize):
- self.pool.put(None)
-
- # These are mostly for testing and debugging purposes.
- self.num_connections = 0
- self.num_requests = 0
- self.conn_kw = conn_kw
-
- if self.proxy:
- # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
- # We cannot know if the user has added default socket options, so we cannot replace the
- # list.
- self.conn_kw.setdefault('socket_options', [])
-
- def _new_conn(self):
- """
- Return a fresh :class:`HTTPConnection`.
- """
- self.num_connections += 1
- log.debug("Starting new HTTP connection (%d): %s",
- self.num_connections, self.host)
-
- conn = self.ConnectionCls(host=self.host, port=self.port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict, **self.conn_kw)
- return conn
-
- def _get_conn(self, timeout=None):
- """
- Get a connection. Will return a pooled connection if one is available.
-
- If no connections are available and :prop:`.block` is ``False``, then a
- fresh connection is returned.
-
- :param timeout:
- Seconds to wait before giving up and raising
- :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
- :prop:`.block` is ``True``.
- """
- conn = None
- try:
- conn = self.pool.get(block=self.block, timeout=timeout)
-
- except AttributeError: # self.pool is None
- raise ClosedPoolError(self, "Pool is closed.")
-
- except queue.Empty:
- if self.block:
- raise EmptyPoolError(self,
- "Pool reached maximum size and no more "
- "connections are allowed.")
- pass # Oh well, we'll create a new connection then
-
- # If this is a persistent connection, check if it got disconnected
- if conn and is_connection_dropped(conn):
- log.debug("Resetting dropped connection: %s", self.host)
- conn.close()
- if getattr(conn, 'auto_open', 1) == 0:
- # This is a proxied connection that has been mutated by
- # httplib._tunnel() and cannot be reused (since it would
- # attempt to bypass the proxy)
- conn = None
-
- return conn or self._new_conn()
-
- def _put_conn(self, conn):
- """
- Put a connection back into the pool.
-
- :param conn:
- Connection object for the current host and port as returned by
- :meth:`._new_conn` or :meth:`._get_conn`.
-
- If the pool is already full, the connection is closed and discarded
- because we exceeded maxsize. If connections are discarded frequently,
- then maxsize should be increased.
-
- If the pool is closed, then the connection will be closed and discarded.
- """
- try:
- self.pool.put(conn, block=False)
- return # Everything is dandy, done.
- except AttributeError:
- # self.pool is None.
- pass
- except queue.Full:
- # This should never happen if self.block == True
- log.warning(
- "Connection pool is full, discarding connection: %s",
- self.host)
-
- # Connection never got put back into the pool, close it.
- if conn:
- conn.close()
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- pass
-
- def _prepare_proxy(self, conn):
- # Nothing to do for HTTP connections.
- pass
-
- def _get_timeout(self, timeout):
- """ Helper that always returns a :class:`urllib3.util.Timeout` """
- if timeout is _Default:
- return self.timeout.clone()
-
- if isinstance(timeout, Timeout):
- return timeout.clone()
- else:
- # User passed us an int/float. This is for backwards compatibility,
- # can be removed later
- return Timeout.from_float(timeout)
-
- def _raise_timeout(self, err, url, timeout_value):
- """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
-
- if isinstance(err, SocketTimeout):
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- # See the above comment about EAGAIN in Python 3. In Python 2 we have
- # to specifically catch it and throw the timeout error
- if hasattr(err, 'errno') and err.errno in _blocking_errnos:
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- # Catch possible read timeouts thrown as SSL errors. If not the
- # case, rethrow the original. We need to do this because of:
- # http://bugs.python.org/issue10272
- if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
- **httplib_request_kw):
- """
- Perform a request on a given urllib connection object taken from our
- pool.
-
- :param conn:
- a connection from one of our connection pools
-
- :param timeout:
- Socket timeout in seconds for the request. This can be a
- float or integer, which will set the same timeout value for
- the socket connect and the socket read, or an instance of
- :class:`urllib3.util.Timeout`, which gives you more fine-grained
- control over your timeouts.
- """
- self.num_requests += 1
-
- timeout_obj = self._get_timeout(timeout)
- timeout_obj.start_connect()
- conn.timeout = timeout_obj.connect_timeout
-
- # Trigger any extra validation we need to do.
- try:
- self._validate_conn(conn)
- except (SocketTimeout, BaseSSLError) as e:
- # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
- self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
- raise
-
- # conn.request() calls httplib.*.request, not the method in
- # urllib3.request. It also calls makefile (recv) on the socket.
- if chunked:
- conn.request_chunked(method, url, **httplib_request_kw)
- else:
- conn.request(method, url, **httplib_request_kw)
-
- # Reset the timeout for the recv() on the socket
- read_timeout = timeout_obj.read_timeout
-
- # App Engine doesn't have a sock attr
- if getattr(conn, 'sock', None):
- # In Python 3 socket.py will catch EAGAIN and return None when you
- # try and read into the file pointer created by http.client, which
- # instead raises a BadStatusLine exception. Instead of catching
- # the exception and assuming all BadStatusLine exceptions are read
- # timeouts, check for a zero timeout before making the request.
- if read_timeout == 0:
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % read_timeout)
- if read_timeout is Timeout.DEFAULT_TIMEOUT:
- conn.sock.settimeout(socket.getdefaulttimeout())
- else: # None or a value
- conn.sock.settimeout(read_timeout)
-
- # Receive the response from the server
- try:
- try: # Python 2.7, use buffering of HTTP responses
- httplib_response = conn.getresponse(buffering=True)
- except TypeError: # Python 2.6 and older, Python 3
- try:
- httplib_response = conn.getresponse()
- except Exception as e:
- # Remove the TypeError from the exception chain in Python 3;
- # otherwise it looks like a programming error was the cause.
- six.raise_from(e, None)
- except (SocketTimeout, BaseSSLError, SocketError) as e:
- self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
- raise
-
- # AppEngine doesn't have a version attr.
- http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
- log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
- method, url, http_version, httplib_response.status,
- httplib_response.length)
-
- try:
- assert_header_parsing(httplib_response.msg)
- except HeaderParsingError as hpe: # Platform-specific: Python 3
- log.warning(
- 'Failed to parse headers (url=%s): %s',
- self._absolute_url(url), hpe, exc_info=True)
-
- return httplib_response
-
- def _absolute_url(self, path):
- return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- # Disable access to the pool
- old_pool, self.pool = self.pool, None
-
- try:
- while True:
- conn = old_pool.get(block=False)
- if conn:
- conn.close()
-
- except queue.Empty:
- pass # Done.
-
- def is_same_host(self, url):
- """
- Check if the given ``url`` is a member of the same host as this
- connection pool.
- """
- if url.startswith('/'):
- return True
-
- # TODO: Add optional support for socket.gethostbyname checking.
- scheme, host, port = get_host(url)
-
- host = _ipv6_host(host).lower()
-
- # Use explicit default port for comparison when none is given
- if self.port and not port:
- port = port_by_scheme.get(scheme)
- elif not self.port and port == port_by_scheme.get(scheme):
- port = None
-
- return (scheme, host, port) == (self.scheme, self.host, self.port)
-
- def urlopen(self, method, url, body=None, headers=None, retries=None,
- redirect=True, assert_same_host=True, timeout=_Default,
- pool_timeout=None, release_conn=None, chunked=False,
- body_pos=None, **response_kw):
- """
- Get a connection from the pool and perform an HTTP request. This is the
- lowest level call for making a request, so you'll need to specify all
- the raw details.
-
- .. note::
-
- More commonly, it's appropriate to use a convenience method provided
- by :class:`.RequestMethods`, such as :meth:`request`.
-
- .. note::
-
- `release_conn` will only behave as expected if
- `preload_content=False` because we want to make
- `preload_content=False` the default behaviour someday soon without
- breaking backwards compatibility.
-
- :param method:
- HTTP request method (such as GET, POST, PUT, etc.)
-
- :param body:
- Data to send in the request body (useful for creating
- POST requests, see HTTPConnectionPool.post_url for
- more convenience).
-
- :param headers:
- Dictionary of custom headers to send, such as User-Agent,
- If-None-Match, etc. If None, pool headers are used. If provided,
- these headers completely replace any pool-specific headers.
-
- :param retries:
- Configure the number of retries to allow before raising a
- :class:`~urllib3.exceptions.MaxRetryError` exception.
-
- Pass ``None`` to retry until you receive a response. Pass a
- :class:`~urllib3.util.retry.Retry` object for fine-grained control
- over different types of retries.
- Pass an integer number to retry connection errors that many times,
- but no other types of errors. Pass zero to never retry.
-
- If ``False``, then retries are disabled and any exception is raised
- immediately. Also, instead of raising a MaxRetryError on redirects,
- the redirect response will be returned.
-
- :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
-
- :param redirect:
- If True, automatically handle redirects (status codes 301, 302,
- 303, 307, 308). Each redirect counts as a retry. Disabling retries
- will disable redirect, too.
-
- :param assert_same_host:
- If ``True``, will make sure that the host of the pool requests is
- consistent else will raise HostChangedError. When False, you can
- use the pool on an HTTP proxy and request foreign hosts.
-
- :param timeout:
- If specified, overrides the default timeout for this one
- request. It may be a float (in seconds) or an instance of
- :class:`urllib3.util.Timeout`.
-
- :param pool_timeout:
- If set and the pool is set to block=True, then this method will
- block for ``pool_timeout`` seconds and raise EmptyPoolError if no
- connection is available within the time period.
-
- :param release_conn:
- If False, then the urlopen call will not release the connection
- back into the pool once a response is received (but will release if
- you read the entire contents of the response such as when
- `preload_content=True`). This is useful if you're not preloading
- the response's content immediately. You will need to call
- ``r.release_conn()`` on the response ``r`` to return the connection
- back into the pool. If None, it takes the value of
- ``response_kw.get('preload_content', True)``.
-
- :param chunked:
- If True, urllib3 will send the body using chunked transfer
- encoding. Otherwise, urllib3 will send the body using the standard
- content-length form. Defaults to False.
-
- :param int body_pos:
- Position to seek to in file-like body in the event of a retry or
- redirect. Typically this won't need to be set because urllib3 will
- auto-populate the value when needed.
-
- :param \\**response_kw:
- Additional parameters are passed to
- :meth:`urllib3.response.HTTPResponse.from_httplib`
- """
- if headers is None:
- headers = self.headers
-
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
-
- if release_conn is None:
- release_conn = response_kw.get('preload_content', True)
-
- # Check host
- if assert_same_host and not self.is_same_host(url):
- raise HostChangedError(self, url, retries)
-
- conn = None
-
- # Track whether `conn` needs to be released before
- # returning/raising/recursing. Update this variable if necessary, and
- # leave `release_conn` constant throughout the function. That way, if
- # the function recurses, the original value of `release_conn` will be
- # passed down into the recursive call, and its value will be respected.
- #
- # See issue #651 [1] for details.
- #
- # [1] <https://github.com/shazow/urllib3/issues/651>
- release_this_conn = release_conn
-
- # Merge the proxy headers. Only do this in HTTP. We have to copy the
- # headers dict so we can safely change it without those changes being
- # reflected in anyone else's copy.
- if self.scheme == 'http':
- headers = headers.copy()
- headers.update(self.proxy_headers)
-
- # Must keep the exception bound to a separate variable or else Python 3
- # complains about UnboundLocalError.
- err = None
-
- # Keep track of whether we cleanly exited the except block. This
- # ensures we do proper cleanup in finally.
- clean_exit = False
-
- # Rewind body position, if needed. Record current position
- # for future rewinds in the event of a redirect/retry.
- body_pos = set_file_position(body, body_pos)
-
- try:
- # Request a connection from the queue.
- timeout_obj = self._get_timeout(timeout)
- conn = self._get_conn(timeout=pool_timeout)
-
- conn.timeout = timeout_obj.connect_timeout
-
- is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
- if is_new_proxy_conn:
- self._prepare_proxy(conn)
-
- # Make the request on the httplib connection object.
- httplib_response = self._make_request(conn, method, url,
- timeout=timeout_obj,
- body=body, headers=headers,
- chunked=chunked)
-
- # If we're going to release the connection in ``finally:``, then
- # the response doesn't need to know about the connection. Otherwise
- # it will also try to release it and we'll have a double-release
- # mess.
- response_conn = conn if not release_conn else None
-
- # Pass method to Response for length checking
- response_kw['request_method'] = method
-
- # Import httplib's response into our own wrapper object
- response = self.ResponseCls.from_httplib(httplib_response,
- pool=self,
- connection=response_conn,
- retries=retries,
- **response_kw)
-
- # Everything went great!
- clean_exit = True
-
- except queue.Empty:
- # Timed out by queue.
- raise EmptyPoolError(self, "No pool connections are available.")
-
- except (BaseSSLError, CertificateError) as e:
- # Close the connection. If a connection is reused on which there
- # was a Certificate error, the next request will certainly raise
- # another Certificate error.
- clean_exit = False
- raise SSLError(e)
-
- except SSLError:
- # Treat SSLError separately from BaseSSLError to preserve
- # traceback.
- clean_exit = False
- raise
-
- except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
- # Discard the connection for these exceptions. It will be
- # be replaced during the next _get_conn() call.
- clean_exit = False
-
- if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
- e = ProxyError('Cannot connect to proxy.', e)
- elif isinstance(e, (SocketError, HTTPException)):
- e = ProtocolError('Connection aborted.', e)
-
- retries = retries.increment(method, url, error=e, _pool=self,
- _stacktrace=sys.exc_info()[2])
- retries.sleep()
-
- # Keep track of the error for the retry warning.
- err = e
-
- finally:
- if not clean_exit:
- # We hit some kind of exception, handled or otherwise. We need
- # to throw the connection away unless explicitly told not to.
- # Close the connection, set the variable to None, and make sure
- # we put the None back in the pool to avoid leaking it.
- conn = conn and conn.close()
- release_this_conn = True
-
- if release_this_conn:
- # Put the connection back to be reused. If the connection is
- # expired then it will be None, which will get replaced with a
- # fresh connection during _get_conn.
- self._put_conn(conn)
-
- if not conn:
- # Try again
- log.warning("Retrying (%r) after connection "
- "broken by '%r': %s", retries, err, url)
- return self.urlopen(method, url, body, headers, retries,
- redirect, assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn, body_pos=body_pos,
- **response_kw)
-
- # Handle redirect?
- redirect_location = redirect and response.get_redirect_location()
- if redirect_location:
- if response.status == 303:
- method = 'GET'
-
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- # Release the connection for this response, since we're not
- # returning it to be released manually.
- response.release_conn()
- raise
- return response
-
- retries.sleep_for_retry(response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(
- method, redirect_location, body, headers,
- retries=retries, redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn, body_pos=body_pos,
- **response_kw)
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(response.getheader('Retry-After'))
- if retries.is_retry(method, response.status, has_retry_after):
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_status:
- # Release the connection for this response, since we're not
- # returning it to be released manually.
- response.release_conn()
- raise
- return response
- retries.sleep(response)
- log.debug("Retry: %s", url)
- return self.urlopen(
- method, url, body, headers,
- retries=retries, redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn,
- body_pos=body_pos, **response_kw)
-
- return response
-
-
-class HTTPSConnectionPool(HTTPConnectionPool):
- """
- Same as :class:`.HTTPConnectionPool`, but HTTPS.
-
- When Python is compiled with the :mod:`ssl` module, then
- :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
- instead of :class:`.HTTPSConnection`.
-
- :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
- ``assert_hostname`` and ``host`` in this order to verify connections.
- If ``assert_hostname`` is False, no verification is done.
-
- The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
- ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
- available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
- the connection socket into an SSL socket.
- """
-
- scheme = 'https'
- ConnectionCls = HTTPSConnection
-
- def __init__(self, host, port=None,
- strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
- block=False, headers=None, retries=None,
- _proxy=None, _proxy_headers=None,
- key_file=None, cert_file=None, cert_reqs=None,
- ca_certs=None, ssl_version=None,
- assert_hostname=None, assert_fingerprint=None,
- ca_cert_dir=None, **conn_kw):
-
- HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
- block, headers, retries, _proxy, _proxy_headers,
- **conn_kw)
-
- if ca_certs and cert_reqs is None:
- cert_reqs = 'CERT_REQUIRED'
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.ca_certs = ca_certs
- self.ca_cert_dir = ca_cert_dir
- self.ssl_version = ssl_version
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
-
- def _prepare_conn(self, conn):
- """
- Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
- and establish the tunnel if proxy is used.
- """
-
- if isinstance(conn, VerifiedHTTPSConnection):
- conn.set_cert(key_file=self.key_file,
- cert_file=self.cert_file,
- cert_reqs=self.cert_reqs,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- assert_hostname=self.assert_hostname,
- assert_fingerprint=self.assert_fingerprint)
- conn.ssl_version = self.ssl_version
- return conn
-
- def _prepare_proxy(self, conn):
- """
- Establish tunnel connection early, because otherwise httplib
- would improperly set Host: header to proxy's IP:port.
- """
- # Python 2.7+
- try:
- set_tunnel = conn.set_tunnel
- except AttributeError: # Platform-specific: Python 2.6
- set_tunnel = conn._set_tunnel
-
- if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
- set_tunnel(self.host, self.port)
- else:
- set_tunnel(self.host, self.port, self.proxy_headers)
-
- conn.connect()
-
- def _new_conn(self):
- """
- Return a fresh :class:`httplib.HTTPSConnection`.
- """
- self.num_connections += 1
- log.debug("Starting new HTTPS connection (%d): %s",
- self.num_connections, self.host)
-
- if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
- raise SSLError("Can't connect to HTTPS URL because the SSL "
- "module is not available.")
-
- actual_host = self.host
- actual_port = self.port
- if self.proxy is not None:
- actual_host = self.proxy.host
- actual_port = self.proxy.port
-
- conn = self.ConnectionCls(host=actual_host, port=actual_port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict, **self.conn_kw)
-
- return self._prepare_conn(conn)
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- super(HTTPSConnectionPool, self)._validate_conn(conn)
-
- # Force connect early to allow us to validate the connection.
- if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
- conn.connect()
-
- if not conn.is_verified:
- warnings.warn((
- 'Unverified HTTPS request is being made. '
- 'Adding certificate verification is strongly advised. See: '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings'),
- InsecureRequestWarning)
-
-
-def connection_from_url(url, **kw):
- """
- Given a url, return an :class:`.ConnectionPool` instance of its host.
-
- This is a shortcut for not having to parse out the scheme, host, and port
- of the url before creating an :class:`.ConnectionPool` instance.
-
- :param url:
- Absolute URL string that must include the scheme. Port is optional.
-
- :param \\**kw:
- Passes additional parameters to the constructor of the appropriate
- :class:`.ConnectionPool`. Useful for specifying things like
- timeout, maxsize, headers, etc.
-
- Example::
-
- >>> conn = connection_from_url('http://google.com/')
- >>> r = conn.request('GET', '/')
- """
- scheme, host, port = get_host(url)
- port = port or port_by_scheme.get(scheme, 80)
- if scheme == 'https':
- return HTTPSConnectionPool(host, port=port, **kw)
- else:
- return HTTPConnectionPool(host, port=port, **kw)
-
-
-def _ipv6_host(host):
- """
- Process IPv6 address literals
- """
-
- # httplib doesn't like it when we include brackets in IPv6 addresses
- # Specifically, if we include brackets but also pass the port then
- # httplib crazily doubles up the square brackets on the Host header.
- # Instead, we need to make sure we never pass ``None`` as the port.
- # However, for backward compatibility reasons we can't actually
- # *assert* that. See http://bugs.python.org/issue28539
- #
- # Also if an IPv6 address literal has a zone identifier, the
- # percent sign might be URIencoded, convert it back into ASCII
- if host.startswith('[') and host.endswith(']'):
- host = host.replace('%25', '%').strip('[]')
- return host
diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/python.d/python_modules/urllib3/contrib/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/python.d/python_modules/urllib3/contrib/__init__.py
+++ /dev/null
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
+++ /dev/null
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
deleted file mode 100644
index e26b84086..000000000
--- a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
+++ /dev/null
@@ -1,590 +0,0 @@
-"""
-This module uses ctypes to bind a whole bunch of functions and constants from
-SecureTransport. The goal here is to provide the low-level API to
-SecureTransport. These are essentially the C-level functions and constants, and
-they're pretty gross to work with.
-
-This code is a bastardised version of the code found in Will Bond's oscrypto
-library. An enormous debt is owed to him for blazing this trail for us. For
-that reason, this code should be considered to be covered both by urllib3's
-license and by oscrypto's:
-
- Copyright (c) 2015-2016 Will Bond <will@wbond.net>
-
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software"),
- to deal in the Software without restriction, including without limitation
- the rights to use, copy, modify, merge, publish, distribute, sublicense,
- and/or sell copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- DEALINGS IN THE SOFTWARE.
-"""
-from __future__ import absolute_import
-
-import platform
-from ctypes.util import find_library
-from ctypes import (
- c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
- c_bool
-)
-from ctypes import CDLL, POINTER, CFUNCTYPE
-
-
-security_path = find_library('Security')
-if not security_path:
- raise ImportError('The library Security could not be found')
-
-
-core_foundation_path = find_library('CoreFoundation')
-if not core_foundation_path:
- raise ImportError('The library CoreFoundation could not be found')
-
-
-version = platform.mac_ver()[0]
-version_info = tuple(map(int, version.split('.')))
-if version_info < (10, 8):
- raise OSError(
- 'Only OS X 10.8 and newer are supported, not %s.%s' % (
- version_info[0], version_info[1]
- )
- )
-
-Security = CDLL(security_path, use_errno=True)
-CoreFoundation = CDLL(core_foundation_path, use_errno=True)
-
-Boolean = c_bool
-CFIndex = c_long
-CFStringEncoding = c_uint32
-CFData = c_void_p
-CFString = c_void_p
-CFArray = c_void_p
-CFMutableArray = c_void_p
-CFDictionary = c_void_p
-CFError = c_void_p
-CFType = c_void_p
-CFTypeID = c_ulong
-
-CFTypeRef = POINTER(CFType)
-CFAllocatorRef = c_void_p
-
-OSStatus = c_int32
-
-CFDataRef = POINTER(CFData)
-CFStringRef = POINTER(CFString)
-CFArrayRef = POINTER(CFArray)
-CFMutableArrayRef = POINTER(CFMutableArray)
-CFDictionaryRef = POINTER(CFDictionary)
-CFArrayCallBacks = c_void_p
-CFDictionaryKeyCallBacks = c_void_p
-CFDictionaryValueCallBacks = c_void_p
-
-SecCertificateRef = POINTER(c_void_p)
-SecExternalFormat = c_uint32
-SecExternalItemType = c_uint32
-SecIdentityRef = POINTER(c_void_p)
-SecItemImportExportFlags = c_uint32
-SecItemImportExportKeyParameters = c_void_p
-SecKeychainRef = POINTER(c_void_p)
-SSLProtocol = c_uint32
-SSLCipherSuite = c_uint32
-SSLContextRef = POINTER(c_void_p)
-SecTrustRef = POINTER(c_void_p)
-SSLConnectionRef = c_uint32
-SecTrustResultType = c_uint32
-SecTrustOptionFlags = c_uint32
-SSLProtocolSide = c_uint32
-SSLConnectionType = c_uint32
-SSLSessionOption = c_uint32
-
-
-try:
- Security.SecItemImport.argtypes = [
- CFDataRef,
- CFStringRef,
- POINTER(SecExternalFormat),
- POINTER(SecExternalItemType),
- SecItemImportExportFlags,
- POINTER(SecItemImportExportKeyParameters),
- SecKeychainRef,
- POINTER(CFArrayRef),
- ]
- Security.SecItemImport.restype = OSStatus
-
- Security.SecCertificateGetTypeID.argtypes = []
- Security.SecCertificateGetTypeID.restype = CFTypeID
-
- Security.SecIdentityGetTypeID.argtypes = []
- Security.SecIdentityGetTypeID.restype = CFTypeID
-
- Security.SecKeyGetTypeID.argtypes = []
- Security.SecKeyGetTypeID.restype = CFTypeID
-
- Security.SecCertificateCreateWithData.argtypes = [
- CFAllocatorRef,
- CFDataRef
- ]
- Security.SecCertificateCreateWithData.restype = SecCertificateRef
-
- Security.SecCertificateCopyData.argtypes = [
- SecCertificateRef
- ]
- Security.SecCertificateCopyData.restype = CFDataRef
-
- Security.SecCopyErrorMessageString.argtypes = [
- OSStatus,
- c_void_p
- ]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SecIdentityCreateWithCertificate.argtypes = [
- CFTypeRef,
- SecCertificateRef,
- POINTER(SecIdentityRef)
- ]
- Security.SecIdentityCreateWithCertificate.restype = OSStatus
-
- Security.SecKeychainCreate.argtypes = [
- c_char_p,
- c_uint32,
- c_void_p,
- Boolean,
- c_void_p,
- POINTER(SecKeychainRef)
- ]
- Security.SecKeychainCreate.restype = OSStatus
-
- Security.SecKeychainDelete.argtypes = [
- SecKeychainRef
- ]
- Security.SecKeychainDelete.restype = OSStatus
-
- Security.SecPKCS12Import.argtypes = [
- CFDataRef,
- CFDictionaryRef,
- POINTER(CFArrayRef)
- ]
- Security.SecPKCS12Import.restype = OSStatus
-
- SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
- SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
-
- Security.SSLSetIOFuncs.argtypes = [
- SSLContextRef,
- SSLReadFunc,
- SSLWriteFunc
- ]
- Security.SSLSetIOFuncs.restype = OSStatus
-
- Security.SSLSetPeerID.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t
- ]
- Security.SSLSetPeerID.restype = OSStatus
-
- Security.SSLSetCertificate.argtypes = [
- SSLContextRef,
- CFArrayRef
- ]
- Security.SSLSetCertificate.restype = OSStatus
-
- Security.SSLSetCertificateAuthorities.argtypes = [
- SSLContextRef,
- CFTypeRef,
- Boolean
- ]
- Security.SSLSetCertificateAuthorities.restype = OSStatus
-
- Security.SSLSetConnection.argtypes = [
- SSLContextRef,
- SSLConnectionRef
- ]
- Security.SSLSetConnection.restype = OSStatus
-
- Security.SSLSetPeerDomainName.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t
- ]
- Security.SSLSetPeerDomainName.restype = OSStatus
-
- Security.SSLHandshake.argtypes = [
- SSLContextRef
- ]
- Security.SSLHandshake.restype = OSStatus
-
- Security.SSLRead.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t,
- POINTER(c_size_t)
- ]
- Security.SSLRead.restype = OSStatus
-
- Security.SSLWrite.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t,
- POINTER(c_size_t)
- ]
- Security.SSLWrite.restype = OSStatus
-
- Security.SSLClose.argtypes = [
- SSLContextRef
- ]
- Security.SSLClose.restype = OSStatus
-
- Security.SSLGetNumberSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(c_size_t)
- ]
- Security.SSLGetNumberSupportedCiphers.restype = OSStatus
-
- Security.SSLGetSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t)
- ]
- Security.SSLGetSupportedCiphers.restype = OSStatus
-
- Security.SSLSetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- c_size_t
- ]
- Security.SSLSetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNumberEnabledCiphers.argtype = [
- SSLContextRef,
- POINTER(c_size_t)
- ]
- Security.SSLGetNumberEnabledCiphers.restype = OSStatus
-
- Security.SSLGetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t)
- ]
- Security.SSLGetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNegotiatedCipher.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite)
- ]
- Security.SSLGetNegotiatedCipher.restype = OSStatus
-
- Security.SSLGetNegotiatedProtocolVersion.argtypes = [
- SSLContextRef,
- POINTER(SSLProtocol)
- ]
- Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
-
- Security.SSLCopyPeerTrust.argtypes = [
- SSLContextRef,
- POINTER(SecTrustRef)
- ]
- Security.SSLCopyPeerTrust.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificates.argtypes = [
- SecTrustRef,
- CFArrayRef
- ]
- Security.SecTrustSetAnchorCertificates.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
- SecTrustRef,
- Boolean
- ]
- Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
-
- Security.SecTrustEvaluate.argtypes = [
- SecTrustRef,
- POINTER(SecTrustResultType)
- ]
- Security.SecTrustEvaluate.restype = OSStatus
-
- Security.SecTrustGetCertificateCount.argtypes = [
- SecTrustRef
- ]
- Security.SecTrustGetCertificateCount.restype = CFIndex
-
- Security.SecTrustGetCertificateAtIndex.argtypes = [
- SecTrustRef,
- CFIndex
- ]
- Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
-
- Security.SSLCreateContext.argtypes = [
- CFAllocatorRef,
- SSLProtocolSide,
- SSLConnectionType
- ]
- Security.SSLCreateContext.restype = SSLContextRef
-
- Security.SSLSetSessionOption.argtypes = [
- SSLContextRef,
- SSLSessionOption,
- Boolean
- ]
- Security.SSLSetSessionOption.restype = OSStatus
-
- Security.SSLSetProtocolVersionMin.argtypes = [
- SSLContextRef,
- SSLProtocol
- ]
- Security.SSLSetProtocolVersionMin.restype = OSStatus
-
- Security.SSLSetProtocolVersionMax.argtypes = [
- SSLContextRef,
- SSLProtocol
- ]
- Security.SSLSetProtocolVersionMax.restype = OSStatus
-
- Security.SecCopyErrorMessageString.argtypes = [
- OSStatus,
- c_void_p
- ]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SSLReadFunc = SSLReadFunc
- Security.SSLWriteFunc = SSLWriteFunc
- Security.SSLContextRef = SSLContextRef
- Security.SSLProtocol = SSLProtocol
- Security.SSLCipherSuite = SSLCipherSuite
- Security.SecIdentityRef = SecIdentityRef
- Security.SecKeychainRef = SecKeychainRef
- Security.SecTrustRef = SecTrustRef
- Security.SecTrustResultType = SecTrustResultType
- Security.SecExternalFormat = SecExternalFormat
- Security.OSStatus = OSStatus
-
- Security.kSecImportExportPassphrase = CFStringRef.in_dll(
- Security, 'kSecImportExportPassphrase'
- )
- Security.kSecImportItemIdentity = CFStringRef.in_dll(
- Security, 'kSecImportItemIdentity'
- )
-
- # CoreFoundation time!
- CoreFoundation.CFRetain.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFRetain.restype = CFTypeRef
-
- CoreFoundation.CFRelease.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFRelease.restype = None
-
- CoreFoundation.CFGetTypeID.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFGetTypeID.restype = CFTypeID
-
- CoreFoundation.CFStringCreateWithCString.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFStringEncoding
- ]
- CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
-
- CoreFoundation.CFStringGetCStringPtr.argtypes = [
- CFStringRef,
- CFStringEncoding
- ]
- CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
-
- CoreFoundation.CFStringGetCString.argtypes = [
- CFStringRef,
- c_char_p,
- CFIndex,
- CFStringEncoding
- ]
- CoreFoundation.CFStringGetCString.restype = c_bool
-
- CoreFoundation.CFDataCreate.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFIndex
- ]
- CoreFoundation.CFDataCreate.restype = CFDataRef
-
- CoreFoundation.CFDataGetLength.argtypes = [
- CFDataRef
- ]
- CoreFoundation.CFDataGetLength.restype = CFIndex
-
- CoreFoundation.CFDataGetBytePtr.argtypes = [
- CFDataRef
- ]
- CoreFoundation.CFDataGetBytePtr.restype = c_void_p
-
- CoreFoundation.CFDictionaryCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- POINTER(CFTypeRef),
- CFIndex,
- CFDictionaryKeyCallBacks,
- CFDictionaryValueCallBacks
- ]
- CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
-
- CoreFoundation.CFDictionaryGetValue.argtypes = [
- CFDictionaryRef,
- CFTypeRef
- ]
- CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
-
- CoreFoundation.CFArrayCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- CFIndex,
- CFArrayCallBacks,
- ]
- CoreFoundation.CFArrayCreate.restype = CFArrayRef
-
- CoreFoundation.CFArrayCreateMutable.argtypes = [
- CFAllocatorRef,
- CFIndex,
- CFArrayCallBacks
- ]
- CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
-
- CoreFoundation.CFArrayAppendValue.argtypes = [
- CFMutableArrayRef,
- c_void_p
- ]
- CoreFoundation.CFArrayAppendValue.restype = None
-
- CoreFoundation.CFArrayGetCount.argtypes = [
- CFArrayRef
- ]
- CoreFoundation.CFArrayGetCount.restype = CFIndex
-
- CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
- CFArrayRef,
- CFIndex
- ]
- CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
-
- CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
- CoreFoundation, 'kCFAllocatorDefault'
- )
- CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
- CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
- CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
- )
- CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
- CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
- )
-
- CoreFoundation.CFTypeRef = CFTypeRef
- CoreFoundation.CFArrayRef = CFArrayRef
- CoreFoundation.CFStringRef = CFStringRef
- CoreFoundation.CFDictionaryRef = CFDictionaryRef
-
-except (AttributeError):
- raise ImportError('Error initializing ctypes')
-
-
-class CFConst(object):
- """
- A class object that acts as essentially a namespace for CoreFoundation
- constants.
- """
- kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
-
-
-class SecurityConst(object):
- """
- A class object that acts as essentially a namespace for Security constants.
- """
- kSSLSessionOptionBreakOnServerAuth = 0
-
- kSSLProtocol2 = 1
- kSSLProtocol3 = 2
- kTLSProtocol1 = 4
- kTLSProtocol11 = 7
- kTLSProtocol12 = 8
-
- kSSLClientSide = 1
- kSSLStreamType = 0
-
- kSecFormatPEMSequence = 10
-
- kSecTrustResultInvalid = 0
- kSecTrustResultProceed = 1
- # This gap is present on purpose: this was kSecTrustResultConfirm, which
- # is deprecated.
- kSecTrustResultDeny = 3
- kSecTrustResultUnspecified = 4
- kSecTrustResultRecoverableTrustFailure = 5
- kSecTrustResultFatalTrustFailure = 6
- kSecTrustResultOtherError = 7
-
- errSSLProtocol = -9800
- errSSLWouldBlock = -9803
- errSSLClosedGraceful = -9805
- errSSLClosedNoNotify = -9816
- errSSLClosedAbort = -9806
-
- errSSLXCertChainInvalid = -9807
- errSSLCrypto = -9809
- errSSLInternal = -9810
- errSSLCertExpired = -9814
- errSSLCertNotYetValid = -9815
- errSSLUnknownRootCert = -9812
- errSSLNoRootCert = -9813
- errSSLHostNameMismatch = -9843
- errSSLPeerHandshakeFail = -9824
- errSSLPeerUserCancelled = -9839
- errSSLWeakPeerEphemeralDHKey = -9850
- errSSLServerAuthCompleted = -9841
- errSSLRecordOverflow = -9847
-
- errSecVerifyFailed = -67808
- errSecNoTrustSettings = -25263
- errSecItemNotFound = -25300
- errSecInvalidTrustSettings = -25262
-
- # Cipher suites. We only pick the ones our default cipher string allows.
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
- TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
- TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
- TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
- TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
- TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
- TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
- TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
- TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
- TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
- TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
- TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
- TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
- TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
deleted file mode 100644
index 5e3494bce..000000000
--- a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
+++ /dev/null
@@ -1,343 +0,0 @@
-"""
-Low-level helpers for the SecureTransport bindings.
-
-These are Python functions that are not directly related to the high-level APIs
-but are necessary to get them to work. They include a whole bunch of low-level
-CoreFoundation messing about and memory management. The concerns in this module
-are almost entirely about trying to avoid memory leaks and providing
-appropriate and useful assistance to the higher-level code.
-"""
-import base64
-import ctypes
-import itertools
-import re
-import os
-import ssl
-import tempfile
-
-from .bindings import Security, CoreFoundation, CFConst
-
-
-# This regular expression is used to grab PEM data out of a PEM bundle.
-_PEM_CERTS_RE = re.compile(
- b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
-)
-
-
-def _cf_data_from_bytes(bytestring):
- """
- Given a bytestring, create a CFData object from it. This CFData object must
- be CFReleased by the caller.
- """
- return CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
- )
-
-
-def _cf_dictionary_from_tuples(tuples):
- """
- Given a list of Python tuples, create an associated CFDictionary.
- """
- dictionary_size = len(tuples)
-
- # We need to get the dictionary keys and values out in the same order.
- keys = (t[0] for t in tuples)
- values = (t[1] for t in tuples)
- cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
- cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
-
- return CoreFoundation.CFDictionaryCreate(
- CoreFoundation.kCFAllocatorDefault,
- cf_keys,
- cf_values,
- dictionary_size,
- CoreFoundation.kCFTypeDictionaryKeyCallBacks,
- CoreFoundation.kCFTypeDictionaryValueCallBacks,
- )
-
-
-def _cf_string_to_unicode(value):
- """
- Creates a Unicode string from a CFString object. Used entirely for error
- reporting.
-
- Yes, it annoys me quite a lot that this function is this complex.
- """
- value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
-
- string = CoreFoundation.CFStringGetCStringPtr(
- value_as_void_p,
- CFConst.kCFStringEncodingUTF8
- )
- if string is None:
- buffer = ctypes.create_string_buffer(1024)
- result = CoreFoundation.CFStringGetCString(
- value_as_void_p,
- buffer,
- 1024,
- CFConst.kCFStringEncodingUTF8
- )
- if not result:
- raise OSError('Error copying C string from CFStringRef')
- string = buffer.value
- if string is not None:
- string = string.decode('utf-8')
- return string
-
-
-def _assert_no_error(error, exception_class=None):
- """
- Checks the return code and throws an exception if there is an error to
- report
- """
- if error == 0:
- return
-
- cf_error_string = Security.SecCopyErrorMessageString(error, None)
- output = _cf_string_to_unicode(cf_error_string)
- CoreFoundation.CFRelease(cf_error_string)
-
- if output is None or output == u'':
- output = u'OSStatus %s' % error
-
- if exception_class is None:
- exception_class = ssl.SSLError
-
- raise exception_class(output)
-
-
-def _cert_array_from_pem(pem_bundle):
- """
- Given a bundle of certs in PEM format, turns them into a CFArray of certs
- that can be used to validate a cert chain.
- """
- der_certs = [
- base64.b64decode(match.group(1))
- for match in _PEM_CERTS_RE.finditer(pem_bundle)
- ]
- if not der_certs:
- raise ssl.SSLError("No root certificates specified")
-
- cert_array = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
- )
- if not cert_array:
- raise ssl.SSLError("Unable to allocate memory!")
-
- try:
- for der_bytes in der_certs:
- certdata = _cf_data_from_bytes(der_bytes)
- if not certdata:
- raise ssl.SSLError("Unable to allocate memory!")
- cert = Security.SecCertificateCreateWithData(
- CoreFoundation.kCFAllocatorDefault, certdata
- )
- CoreFoundation.CFRelease(certdata)
- if not cert:
- raise ssl.SSLError("Unable to build cert object!")
-
- CoreFoundation.CFArrayAppendValue(cert_array, cert)
- CoreFoundation.CFRelease(cert)
- except Exception:
- # We need to free the array before the exception bubbles further.
- # We only want to do that if an error occurs: otherwise, the caller
- # should free.
- CoreFoundation.CFRelease(cert_array)
-
- return cert_array
-
-
-def _is_cert(item):
- """
- Returns True if a given CFTypeRef is a certificate.
- """
- expected = Security.SecCertificateGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected
-
-
-def _is_identity(item):
- """
- Returns True if a given CFTypeRef is an identity.
- """
- expected = Security.SecIdentityGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected
-
-
-def _temporary_keychain():
- """
- This function creates a temporary Mac keychain that we can use to work with
- credentials. This keychain uses a one-time password and a temporary file to
- store the data. We expect to have one keychain per socket. The returned
- SecKeychainRef must be freed by the caller, including calling
- SecKeychainDelete.
-
- Returns a tuple of the SecKeychainRef and the path to the temporary
- directory that contains it.
- """
- # Unfortunately, SecKeychainCreate requires a path to a keychain. This
- # means we cannot use mkstemp to use a generic temporary file. Instead,
- # we're going to create a temporary directory and a filename to use there.
- # This filename will be 8 random bytes expanded into base64. We also need
- # some random bytes to password-protect the keychain we're creating, so we
- # ask for 40 random bytes.
- random_bytes = os.urandom(40)
- filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
- password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
- tempdirectory = tempfile.mkdtemp()
-
- keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
-
- # We now want to create the keychain itself.
- keychain = Security.SecKeychainRef()
- status = Security.SecKeychainCreate(
- keychain_path,
- len(password),
- password,
- False,
- None,
- ctypes.byref(keychain)
- )
- _assert_no_error(status)
-
- # Having created the keychain, we want to pass it off to the caller.
- return keychain, tempdirectory
-
-
-def _load_items_from_file(keychain, path):
- """
- Given a single file, loads all the trust objects from it into arrays and
- the keychain.
- Returns a tuple of lists: the first list is a list of identities, the
- second a list of certs.
- """
- certificates = []
- identities = []
- result_array = None
-
- with open(path, 'rb') as f:
- raw_filedata = f.read()
-
- try:
- filedata = CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault,
- raw_filedata,
- len(raw_filedata)
- )
- result_array = CoreFoundation.CFArrayRef()
- result = Security.SecItemImport(
- filedata, # cert data
- None, # Filename, leaving it out for now
- None, # What the type of the file is, we don't care
- None, # what's in the file, we don't care
- 0, # import flags
- None, # key params, can include passphrase in the future
- keychain, # The keychain to insert into
- ctypes.byref(result_array) # Results
- )
- _assert_no_error(result)
-
- # A CFArray is not very useful to us as an intermediary
- # representation, so we are going to extract the objects we want
- # and then free the array. We don't need to keep hold of keys: the
- # keychain already has them!
- result_count = CoreFoundation.CFArrayGetCount(result_array)
- for index in range(result_count):
- item = CoreFoundation.CFArrayGetValueAtIndex(
- result_array, index
- )
- item = ctypes.cast(item, CoreFoundation.CFTypeRef)
-
- if _is_cert(item):
- CoreFoundation.CFRetain(item)
- certificates.append(item)
- elif _is_identity(item):
- CoreFoundation.CFRetain(item)
- identities.append(item)
- finally:
- if result_array:
- CoreFoundation.CFRelease(result_array)
-
- CoreFoundation.CFRelease(filedata)
-
- return (identities, certificates)
-
-
-def _load_client_cert_chain(keychain, *paths):
- """
- Load certificates and maybe keys from a number of files. Has the end goal
- of returning a CFArray containing one SecIdentityRef, and then zero or more
- SecCertificateRef objects, suitable for use as a client certificate trust
- chain.
- """
- # Ok, the strategy.
- #
- # This relies on knowing that macOS will not give you a SecIdentityRef
- # unless you have imported a key into a keychain. This is a somewhat
- # artificial limitation of macOS (for example, it doesn't necessarily
- # affect iOS), but there is nothing inside Security.framework that lets you
- # get a SecIdentityRef without having a key in a keychain.
- #
- # So the policy here is we take all the files and iterate them in order.
- # Each one will use SecItemImport to have one or more objects loaded from
- # it. We will also point at a keychain that macOS can use to work with the
- # private key.
- #
- # Once we have all the objects, we'll check what we actually have. If we
- # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
- # we'll take the first certificate (which we assume to be our leaf) and
- # ask the keychain to give us a SecIdentityRef with that cert's associated
- # key.
- #
- # We'll then return a CFArray containing the trust chain: one
- # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
- # responsibility for freeing this CFArray will be with the caller. This
- # CFArray must remain alive for the entire connection, so in practice it
- # will be stored with a single SSLSocket, along with the reference to the
- # keychain.
- certificates = []
- identities = []
-
- # Filter out bad paths.
- paths = (path for path in paths if path)
-
- try:
- for file_path in paths:
- new_identities, new_certs = _load_items_from_file(
- keychain, file_path
- )
- identities.extend(new_identities)
- certificates.extend(new_certs)
-
- # Ok, we have everything. The question is: do we have an identity? If
- # not, we want to grab one from the first cert we have.
- if not identities:
- new_identity = Security.SecIdentityRef()
- status = Security.SecIdentityCreateWithCertificate(
- keychain,
- certificates[0],
- ctypes.byref(new_identity)
- )
- _assert_no_error(status)
- identities.append(new_identity)
-
- # We now want to release the original certificate, as we no longer
- # need it.
- CoreFoundation.CFRelease(certificates.pop(0))
-
- # We now need to build a new CFArray that holds the trust chain.
- trust_chain = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
- )
- for item in itertools.chain(identities, certificates):
- # ArrayAppendValue does a CFRetain on the item. That's fine,
- # because the finally block will release our other refs to them.
- CoreFoundation.CFArrayAppendValue(trust_chain, item)
-
- return trust_chain
- finally:
- for obj in itertools.chain(identities, certificates):
- CoreFoundation.CFRelease(obj)
diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/python.d/python_modules/urllib3/contrib/appengine.py
deleted file mode 100644
index 814b0222d..000000000
--- a/python.d/python_modules/urllib3/contrib/appengine.py
+++ /dev/null
@@ -1,296 +0,0 @@
-"""
-This module provides a pool manager that uses Google App Engine's
-`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
-
-Example usage::
-
- from urllib3 import PoolManager
- from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
-
- if is_appengine_sandbox():
- # AppEngineManager uses AppEngine's URLFetch API behind the scenes
- http = AppEngineManager()
- else:
- # PoolManager uses a socket-level API behind the scenes
- http = PoolManager()
-
- r = http.request('GET', 'https://google.com/')
-
-There are `limitations <https://cloud.google.com/appengine/docs/python/\
-urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
-the best choice for your application. There are three options for using
-urllib3 on Google App Engine:
-
-1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
- cost-effective in many circumstances as long as your usage is within the
- limitations.
-2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
- Sockets also have `limitations and restrictions
- <https://cloud.google.com/appengine/docs/python/sockets/\
- #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
- To use sockets, be sure to specify the following in your ``app.yaml``::
-
- env_variables:
- GAE_USE_SOCKETS_HTTPLIB : 'true'
-
-3. If you are using `App Engine Flexible
-<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
-:class:`PoolManager` without any configuration or special environment variables.
-"""
-
-from __future__ import absolute_import
-import logging
-import os
-import warnings
-from ..packages.six.moves.urllib.parse import urljoin
-
-from ..exceptions import (
- HTTPError,
- HTTPWarning,
- MaxRetryError,
- ProtocolError,
- TimeoutError,
- SSLError
-)
-
-from ..packages.six import BytesIO
-from ..request import RequestMethods
-from ..response import HTTPResponse
-from ..util.timeout import Timeout
-from ..util.retry import Retry
-
-try:
- from google.appengine.api import urlfetch
-except ImportError:
- urlfetch = None
-
-
-log = logging.getLogger(__name__)
-
-
-class AppEnginePlatformWarning(HTTPWarning):
- pass
-
-
-class AppEnginePlatformError(HTTPError):
- pass
-
-
-class AppEngineManager(RequestMethods):
- """
- Connection manager for Google App Engine sandbox applications.
-
- This manager uses the URLFetch service directly instead of using the
- emulated httplib, and is subject to URLFetch limitations as described in
- the App Engine documentation `here
- <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
-
- Notably it will raise an :class:`AppEnginePlatformError` if:
- * URLFetch is not available.
- * If you attempt to use this on App Engine Flexible, as full socket
- support is available.
- * If a request size is more than 10 megabytes.
- * If a response size is more than 32 megabtyes.
- * If you use an unsupported request method such as OPTIONS.
-
- Beyond those cases, it will raise normal urllib3 errors.
- """
-
- def __init__(self, headers=None, retries=None, validate_certificate=True,
- urlfetch_retries=True):
- if not urlfetch:
- raise AppEnginePlatformError(
- "URLFetch is not available in this environment.")
-
- if is_prod_appengine_mvms():
- raise AppEnginePlatformError(
- "Use normal urllib3.PoolManager instead of AppEngineManager"
- "on Managed VMs, as using URLFetch is not necessary in "
- "this environment.")
-
- warnings.warn(
- "urllib3 is using URLFetch on Google App Engine sandbox instead "
- "of sockets. To use sockets directly instead of URLFetch see "
- "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
- AppEnginePlatformWarning)
-
- RequestMethods.__init__(self, headers)
- self.validate_certificate = validate_certificate
- self.urlfetch_retries = urlfetch_retries
-
- self.retries = retries or Retry.DEFAULT
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- # Return False to re-raise any potential exceptions
- return False
-
- def urlopen(self, method, url, body=None, headers=None,
- retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
- **response_kw):
-
- retries = self._get_retries(retries, redirect)
-
- try:
- follow_redirects = (
- redirect and
- retries.redirect != 0 and
- retries.total)
- response = urlfetch.fetch(
- url,
- payload=body,
- method=method,
- headers=headers or {},
- allow_truncated=False,
- follow_redirects=self.urlfetch_retries and follow_redirects,
- deadline=self._get_absolute_timeout(timeout),
- validate_certificate=self.validate_certificate,
- )
- except urlfetch.DeadlineExceededError as e:
- raise TimeoutError(self, e)
-
- except urlfetch.InvalidURLError as e:
- if 'too large' in str(e):
- raise AppEnginePlatformError(
- "URLFetch request too large, URLFetch only "
- "supports requests up to 10mb in size.", e)
- raise ProtocolError(e)
-
- except urlfetch.DownloadError as e:
- if 'Too many redirects' in str(e):
- raise MaxRetryError(self, url, reason=e)
- raise ProtocolError(e)
-
- except urlfetch.ResponseTooLargeError as e:
- raise AppEnginePlatformError(
- "URLFetch response too large, URLFetch only supports"
- "responses up to 32mb in size.", e)
-
- except urlfetch.SSLCertificateError as e:
- raise SSLError(e)
-
- except urlfetch.InvalidMethodError as e:
- raise AppEnginePlatformError(
- "URLFetch does not support method: %s" % method, e)
-
- http_response = self._urlfetch_response_to_http_response(
- response, retries=retries, **response_kw)
-
- # Handle redirect?
- redirect_location = redirect and http_response.get_redirect_location()
- if redirect_location:
- # Check for redirect response
- if (self.urlfetch_retries and retries.raise_on_redirect):
- raise MaxRetryError(self, url, "too many redirects")
- else:
- if http_response.status == 303:
- method = 'GET'
-
- try:
- retries = retries.increment(method, url, response=http_response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- raise MaxRetryError(self, url, "too many redirects")
- return http_response
-
- retries.sleep_for_retry(http_response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- redirect_url = urljoin(url, redirect_location)
- return self.urlopen(
- method, redirect_url, body, headers,
- retries=retries, redirect=redirect,
- timeout=timeout, **response_kw)
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(http_response.getheader('Retry-After'))
- if retries.is_retry(method, http_response.status, has_retry_after):
- retries = retries.increment(
- method, url, response=http_response, _pool=self)
- log.debug("Retry: %s", url)
- retries.sleep(http_response)
- return self.urlopen(
- method, url,
- body=body, headers=headers,
- retries=retries, redirect=redirect,
- timeout=timeout, **response_kw)
-
- return http_response
-
- def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
-
- if is_prod_appengine():
- # Production GAE handles deflate encoding automatically, but does
- # not remove the encoding header.
- content_encoding = urlfetch_resp.headers.get('content-encoding')
-
- if content_encoding == 'deflate':
- del urlfetch_resp.headers['content-encoding']
-
- transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
- # We have a full response's content,
- # so let's make sure we don't report ourselves as chunked data.
- if transfer_encoding == 'chunked':
- encodings = transfer_encoding.split(",")
- encodings.remove('chunked')
- urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
-
- return HTTPResponse(
- # In order for decoding to work, we must present the content as
- # a file-like object.
- body=BytesIO(urlfetch_resp.content),
- headers=urlfetch_resp.headers,
- status=urlfetch_resp.status_code,
- **response_kw
- )
-
- def _get_absolute_timeout(self, timeout):
- if timeout is Timeout.DEFAULT_TIMEOUT:
- return None # Defer to URLFetch's default.
- if isinstance(timeout, Timeout):
- if timeout._read is not None or timeout._connect is not None:
- warnings.warn(
- "URLFetch does not support granular timeout settings, "
- "reverting to total or default URLFetch timeout.",
- AppEnginePlatformWarning)
- return timeout.total
- return timeout
-
- def _get_retries(self, retries, redirect):
- if not isinstance(retries, Retry):
- retries = Retry.from_int(
- retries, redirect=redirect, default=self.retries)
-
- if retries.connect or retries.read or retries.redirect:
- warnings.warn(
- "URLFetch only supports total retries and does not "
- "recognize connect, read, or redirect retry parameters.",
- AppEnginePlatformWarning)
-
- return retries
-
-
-def is_appengine():
- return (is_local_appengine() or
- is_prod_appengine() or
- is_prod_appengine_mvms())
-
-
-def is_appengine_sandbox():
- return is_appengine() and not is_prod_appengine_mvms()
-
-
-def is_local_appengine():
- return ('APPENGINE_RUNTIME' in os.environ and
- 'Development/' in os.environ['SERVER_SOFTWARE'])
-
-
-def is_prod_appengine():
- return ('APPENGINE_RUNTIME' in os.environ and
- 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
- not is_prod_appengine_mvms())
-
-
-def is_prod_appengine_mvms():
- return os.environ.get('GAE_VM', False) == 'true'
diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/python.d/python_modules/urllib3/contrib/ntlmpool.py
deleted file mode 100644
index 642e99ed2..000000000
--- a/python.d/python_modules/urllib3/contrib/ntlmpool.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""
-NTLM authenticating pool, contributed by erikcederstran
-
-Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
-"""
-from __future__ import absolute_import
-
-from logging import getLogger
-from ntlm import ntlm
-
-from .. import HTTPSConnectionPool
-from ..packages.six.moves.http_client import HTTPSConnection
-
-
-log = getLogger(__name__)
-
-
-class NTLMConnectionPool(HTTPSConnectionPool):
- """
- Implements an NTLM authentication version of an urllib3 connection pool
- """
-
- scheme = 'https'
-
- def __init__(self, user, pw, authurl, *args, **kwargs):
- """
- authurl is a random URL on the server that is protected by NTLM.
- user is the Windows user, probably in the DOMAIN\\username format.
- pw is the password for the user.
- """
- super(NTLMConnectionPool, self).__init__(*args, **kwargs)
- self.authurl = authurl
- self.rawuser = user
- user_parts = user.split('\\', 1)
- self.domain = user_parts[0].upper()
- self.user = user_parts[1]
- self.pw = pw
-
- def _new_conn(self):
- # Performs the NTLM handshake that secures the connection. The socket
- # must be kept open while requests are performed.
- self.num_connections += 1
- log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
- self.num_connections, self.host, self.authurl)
-
- headers = {}
- headers['Connection'] = 'Keep-Alive'
- req_header = 'Authorization'
- resp_header = 'www-authenticate'
-
- conn = HTTPSConnection(host=self.host, port=self.port)
-
- # Send negotiation message
- headers[req_header] = (
- 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
- log.debug('Request headers: %s', headers)
- conn.request('GET', self.authurl, None, headers)
- res = conn.getresponse()
- reshdr = dict(res.getheaders())
- log.debug('Response status: %s %s', res.status, res.reason)
- log.debug('Response headers: %s', reshdr)
- log.debug('Response data: %s [...]', res.read(100))
-
- # Remove the reference to the socket, so that it can not be closed by
- # the response object (we want to keep the socket open)
- res.fp = None
-
- # Server should respond with a challenge message
- auth_header_values = reshdr[resp_header].split(', ')
- auth_header_value = None
- for s in auth_header_values:
- if s[:5] == 'NTLM ':
- auth_header_value = s[5:]
- if auth_header_value is None:
- raise Exception('Unexpected %s response header: %s' %
- (resp_header, reshdr[resp_header]))
-
- # Send authentication message
- ServerChallenge, NegotiateFlags = \
- ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
- auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
- self.user,
- self.domain,
- self.pw,
- NegotiateFlags)
- headers[req_header] = 'NTLM %s' % auth_msg
- log.debug('Request headers: %s', headers)
- conn.request('GET', self.authurl, None, headers)
- res = conn.getresponse()
- log.debug('Response status: %s %s', res.status, res.reason)
- log.debug('Response headers: %s', dict(res.getheaders()))
- log.debug('Response data: %s [...]', res.read()[:100])
- if res.status != 200:
- if res.status == 401:
- raise Exception('Server rejected request: wrong '
- 'username or password')
- raise Exception('Wrong server response: %s %s' %
- (res.status, res.reason))
-
- res.fp = None
- log.debug('Connection established')
- return conn
-
- def urlopen(self, method, url, body=None, headers=None, retries=3,
- redirect=True, assert_same_host=True):
- if headers is None:
- headers = {}
- headers['Connection'] = 'Keep-Alive'
- return super(NTLMConnectionPool, self).urlopen(method, url, body,
- headers, retries,
- redirect,
- assert_same_host)
diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/python.d/python_modules/urllib3/contrib/pyopenssl.py
deleted file mode 100644
index 6645dbaa9..000000000
--- a/python.d/python_modules/urllib3/contrib/pyopenssl.py
+++ /dev/null
@@ -1,457 +0,0 @@
-"""
-SSL with SNI_-support for Python 2. Follow these instructions if you would
-like to verify SSL certificates in Python 2. Note, the default libraries do
-*not* do certificate checking; you need to do additional work to validate
-certificates yourself.
-
-This needs the following packages installed:
-
-* pyOpenSSL (tested with 16.0.0)
-* cryptography (minimum 1.3.4, from pyopenssl)
-* idna (minimum 2.0, from cryptography)
-
-However, pyopenssl depends on cryptography, which depends on idna, so while we
-use all three directly here we end up having relatively few packages required.
-
-You can install them with the following command:
-
- pip install pyopenssl cryptography idna
-
-To activate certificate checking, call
-:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
-before you begin making HTTP requests. This can be done in a ``sitecustomize``
-module, or at any other time before your application begins using ``urllib3``,
-like this::
-
- try:
- import urllib3.contrib.pyopenssl
- urllib3.contrib.pyopenssl.inject_into_urllib3()
- except ImportError:
- pass
-
-Now you can use :mod:`urllib3` as you normally would, and it will support SNI
-when the required modules are installed.
-
-Activating this module also has the positive side effect of disabling SSL/TLS
-compression in Python 2 (see `CRIME attack`_).
-
-If you want to configure the default list of supported cipher suites, you can
-set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
-
-.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
-.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
-"""
-from __future__ import absolute_import
-
-import OpenSSL.SSL
-from cryptography import x509
-from cryptography.hazmat.backends.openssl import backend as openssl_backend
-from cryptography.hazmat.backends.openssl.x509 import _Certificate
-
-from socket import timeout, error as SocketError
-from io import BytesIO
-
-try: # Platform-specific: Python 2
- from socket import _fileobject
-except ImportError: # Platform-specific: Python 3
- _fileobject = None
- from ..packages.backports.makefile import backport_makefile
-
-import logging
-import ssl
-
-try:
- import six
-except ImportError:
- from ..packages import six
-
-import sys
-
-from .. import util
-
-__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
-
-# SNI always works.
-HAS_SNI = True
-
-# Map from urllib3 to PyOpenSSL compatible parameter-values.
-_openssl_versions = {
- ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
- ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
-}
-
-if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
- _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
-
-if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
- _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
-
-try:
- _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
-except AttributeError:
- pass
-
-_stdlib_to_openssl_verify = {
- ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
- ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
- ssl.CERT_REQUIRED:
- OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
-}
-_openssl_to_stdlib_verify = dict(
- (v, k) for k, v in _stdlib_to_openssl_verify.items()
-)
-
-# OpenSSL will only write 16K at a time
-SSL_WRITE_BLOCKSIZE = 16384
-
-orig_util_HAS_SNI = util.HAS_SNI
-orig_util_SSLContext = util.ssl_.SSLContext
-
-
-log = logging.getLogger(__name__)
-
-
-def inject_into_urllib3():
- 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
-
- _validate_dependencies_met()
-
- util.ssl_.SSLContext = PyOpenSSLContext
- util.HAS_SNI = HAS_SNI
- util.ssl_.HAS_SNI = HAS_SNI
- util.IS_PYOPENSSL = True
- util.ssl_.IS_PYOPENSSL = True
-
-
-def extract_from_urllib3():
- 'Undo monkey-patching by :func:`inject_into_urllib3`.'
-
- util.ssl_.SSLContext = orig_util_SSLContext
- util.HAS_SNI = orig_util_HAS_SNI
- util.ssl_.HAS_SNI = orig_util_HAS_SNI
- util.IS_PYOPENSSL = False
- util.ssl_.IS_PYOPENSSL = False
-
-
-def _validate_dependencies_met():
- """
- Verifies that PyOpenSSL's package-level dependencies have been met.
- Throws `ImportError` if they are not met.
- """
- # Method added in `cryptography==1.1`; not available in older versions
- from cryptography.x509.extensions import Extensions
- if getattr(Extensions, "get_extension_for_class", None) is None:
- raise ImportError("'cryptography' module missing required functionality. "
- "Try upgrading to v1.3.4 or newer.")
-
- # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
- # attribute is only present on those versions.
- from OpenSSL.crypto import X509
- x509 = X509()
- if getattr(x509, "_x509", None) is None:
- raise ImportError("'pyOpenSSL' module missing required functionality. "
- "Try upgrading to v0.14 or newer.")
-
-
-def _dnsname_to_stdlib(name):
- """
- Converts a dNSName SubjectAlternativeName field to the form used by the
- standard library on the given Python version.
-
- Cryptography produces a dNSName as a unicode string that was idna-decoded
- from ASCII bytes. We need to idna-encode that string to get it back, and
- then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
- uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
- """
- def idna_encode(name):
- """
- Borrowed wholesale from the Python Cryptography Project. It turns out
- that we can't just safely call `idna.encode`: it can explode for
- wildcard names. This avoids that problem.
- """
- import idna
-
- for prefix in [u'*.', u'.']:
- if name.startswith(prefix):
- name = name[len(prefix):]
- return prefix.encode('ascii') + idna.encode(name)
- return idna.encode(name)
-
- name = idna_encode(name)
- if sys.version_info >= (3, 0):
- name = name.decode('utf-8')
- return name
-
-
-def get_subj_alt_name(peer_cert):
- """
- Given an PyOpenSSL certificate, provides all the subject alternative names.
- """
- # Pass the cert to cryptography, which has much better APIs for this.
- # This is technically using private APIs, but should work across all
- # relevant versions until PyOpenSSL gets something proper for this.
- cert = _Certificate(openssl_backend, peer_cert._x509)
-
- # We want to find the SAN extension. Ask Cryptography to locate it (it's
- # faster than looping in Python)
- try:
- ext = cert.extensions.get_extension_for_class(
- x509.SubjectAlternativeName
- ).value
- except x509.ExtensionNotFound:
- # No such extension, return the empty list.
- return []
- except (x509.DuplicateExtension, x509.UnsupportedExtension,
- x509.UnsupportedGeneralNameType, UnicodeError) as e:
- # A problem has been found with the quality of the certificate. Assume
- # no SAN field is present.
- log.warning(
- "A problem was encountered with the certificate that prevented "
- "urllib3 from finding the SubjectAlternativeName field. This can "
- "affect certificate validation. The error was %s",
- e,
- )
- return []
-
- # We want to return dNSName and iPAddress fields. We need to cast the IPs
- # back to strings because the match_hostname function wants them as
- # strings.
- # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
- # decoded. This is pretty frustrating, but that's what the standard library
- # does with certificates, and so we need to attempt to do the same.
- names = [
- ('DNS', _dnsname_to_stdlib(name))
- for name in ext.get_values_for_type(x509.DNSName)
- ]
- names.extend(
- ('IP Address', str(name))
- for name in ext.get_values_for_type(x509.IPAddress)
- )
-
- return names
-
-
-class WrappedSocket(object):
- '''API-compatibility wrapper for Python OpenSSL's Connection-class.
-
- Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
- collector of pypy.
- '''
-
- def __init__(self, connection, socket, suppress_ragged_eofs=True):
- self.connection = connection
- self.socket = socket
- self.suppress_ragged_eofs = suppress_ragged_eofs
- self._makefile_refs = 0
- self._closed = False
-
- def fileno(self):
- return self.socket.fileno()
-
- # Copy-pasted from Python 3.5 source code
- def _decref_socketios(self):
- if self._makefile_refs > 0:
- self._makefile_refs -= 1
- if self._closed:
- self.close()
-
- def recv(self, *args, **kwargs):
- try:
- data = self.connection.recv(*args, **kwargs)
- except OpenSSL.SSL.SysCallError as e:
- if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
- return b''
- else:
- raise SocketError(str(e))
- except OpenSSL.SSL.ZeroReturnError as e:
- if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
- return b''
- else:
- raise
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(self.socket, self.socket.gettimeout())
- if not rd:
- raise timeout('The read operation timed out')
- else:
- return self.recv(*args, **kwargs)
- else:
- return data
-
- def recv_into(self, *args, **kwargs):
- try:
- return self.connection.recv_into(*args, **kwargs)
- except OpenSSL.SSL.SysCallError as e:
- if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
- return 0
- else:
- raise SocketError(str(e))
- except OpenSSL.SSL.ZeroReturnError as e:
- if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
- return 0
- else:
- raise
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(self.socket, self.socket.gettimeout())
- if not rd:
- raise timeout('The read operation timed out')
- else:
- return self.recv_into(*args, **kwargs)
-
- def settimeout(self, timeout):
- return self.socket.settimeout(timeout)
-
- def _send_until_done(self, data):
- while True:
- try:
- return self.connection.send(data)
- except OpenSSL.SSL.WantWriteError:
- wr = util.wait_for_write(self.socket, self.socket.gettimeout())
- if not wr:
- raise timeout()
- continue
- except OpenSSL.SSL.SysCallError as e:
- raise SocketError(str(e))
-
- def sendall(self, data):
- total_sent = 0
- while total_sent < len(data):
- sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
- total_sent += sent
-
- def shutdown(self):
- # FIXME rethrow compatible exceptions should we ever use this
- self.connection.shutdown()
-
- def close(self):
- if self._makefile_refs < 1:
- try:
- self._closed = True
- return self.connection.close()
- except OpenSSL.SSL.Error:
- return
- else:
- self._makefile_refs -= 1
-
- def getpeercert(self, binary_form=False):
- x509 = self.connection.get_peer_certificate()
-
- if not x509:
- return x509
-
- if binary_form:
- return OpenSSL.crypto.dump_certificate(
- OpenSSL.crypto.FILETYPE_ASN1,
- x509)
-
- return {
- 'subject': (
- (('commonName', x509.get_subject().CN),),
- ),
- 'subjectAltName': get_subj_alt_name(x509)
- }
-
- def _reuse(self):
- self._makefile_refs += 1
-
- def _drop(self):
- if self._makefile_refs < 1:
- self.close()
- else:
- self._makefile_refs -= 1
-
-
-if _fileobject: # Platform-specific: Python 2
- def makefile(self, mode, bufsize=-1):
- self._makefile_refs += 1
- return _fileobject(self, mode, bufsize, close=True)
-else: # Platform-specific: Python 3
- makefile = backport_makefile
-
-WrappedSocket.makefile = makefile
-
-
-class PyOpenSSLContext(object):
- """
- I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
- for translating the interface of the standard library ``SSLContext`` object
- to calls into PyOpenSSL.
- """
- def __init__(self, protocol):
- self.protocol = _openssl_versions[protocol]
- self._ctx = OpenSSL.SSL.Context(self.protocol)
- self._options = 0
- self.check_hostname = False
-
- @property
- def options(self):
- return self._options
-
- @options.setter
- def options(self, value):
- self._options = value
- self._ctx.set_options(value)
-
- @property
- def verify_mode(self):
- return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
-
- @verify_mode.setter
- def verify_mode(self, value):
- self._ctx.set_verify(
- _stdlib_to_openssl_verify[value],
- _verify_callback
- )
-
- def set_default_verify_paths(self):
- self._ctx.set_default_verify_paths()
-
- def set_ciphers(self, ciphers):
- if isinstance(ciphers, six.text_type):
- ciphers = ciphers.encode('utf-8')
- self._ctx.set_cipher_list(ciphers)
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- if cafile is not None:
- cafile = cafile.encode('utf-8')
- if capath is not None:
- capath = capath.encode('utf-8')
- self._ctx.load_verify_locations(cafile, capath)
- if cadata is not None:
- self._ctx.load_verify_locations(BytesIO(cadata))
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._ctx.use_certificate_file(certfile)
- if password is not None:
- self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
- self._ctx.use_privatekey_file(keyfile or certfile)
-
- def wrap_socket(self, sock, server_side=False,
- do_handshake_on_connect=True, suppress_ragged_eofs=True,
- server_hostname=None):
- cnx = OpenSSL.SSL.Connection(self._ctx, sock)
-
- if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
- server_hostname = server_hostname.encode('utf-8')
-
- if server_hostname is not None:
- cnx.set_tlsext_host_name(server_hostname)
-
- cnx.set_connect_state()
-
- while True:
- try:
- cnx.do_handshake()
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(sock, sock.gettimeout())
- if not rd:
- raise timeout('select timed out')
- continue
- except OpenSSL.SSL.Error as e:
- raise ssl.SSLError('bad handshake: %r' % e)
- break
-
- return WrappedSocket(cnx, sock)
-
-
-def _verify_callback(cnx, x509, err_no, err_depth, return_code):
- return err_no == 0
diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/python.d/python_modules/urllib3/contrib/securetransport.py
deleted file mode 100644
index 72b23ab1c..000000000
--- a/python.d/python_modules/urllib3/contrib/securetransport.py
+++ /dev/null
@@ -1,807 +0,0 @@
-"""
-SecureTranport support for urllib3 via ctypes.
-
-This makes platform-native TLS available to urllib3 users on macOS without the
-use of a compiler. This is an important feature because the Python Package
-Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
-that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
-this is to give macOS users an alternative solution to the problem, and that
-solution is to use SecureTransport.
-
-We use ctypes here because this solution must not require a compiler. That's
-because pip is not allowed to require a compiler either.
-
-This is not intended to be a seriously long-term solution to this problem.
-The hope is that PEP 543 will eventually solve this issue for us, at which
-point we can retire this contrib module. But in the short term, we need to
-solve the impending tire fire that is Python on Mac without this kind of
-contrib module. So...here we are.
-
-To use this module, simply import and inject it::
-
- import urllib3.contrib.securetransport
- urllib3.contrib.securetransport.inject_into_urllib3()
-
-Happy TLSing!
-"""
-from __future__ import absolute_import
-
-import contextlib
-import ctypes
-import errno
-import os.path
-import shutil
-import socket
-import ssl
-import threading
-import weakref
-
-from .. import util
-from ._securetransport.bindings import (
- Security, SecurityConst, CoreFoundation
-)
-from ._securetransport.low_level import (
- _assert_no_error, _cert_array_from_pem, _temporary_keychain,
- _load_client_cert_chain
-)
-
-try: # Platform-specific: Python 2
- from socket import _fileobject
-except ImportError: # Platform-specific: Python 3
- _fileobject = None
- from ..packages.backports.makefile import backport_makefile
-
-try:
- memoryview(b'')
-except NameError:
- raise ImportError("SecureTransport only works on Pythons with memoryview")
-
-__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
-
-# SNI always works
-HAS_SNI = True
-
-orig_util_HAS_SNI = util.HAS_SNI
-orig_util_SSLContext = util.ssl_.SSLContext
-
-# This dictionary is used by the read callback to obtain a handle to the
-# calling wrapped socket. This is a pretty silly approach, but for now it'll
-# do. I feel like I should be able to smuggle a handle to the wrapped socket
-# directly in the SSLConnectionRef, but for now this approach will work I
-# guess.
-#
-# We need to lock around this structure for inserts, but we don't do it for
-# reads/writes in the callbacks. The reasoning here goes as follows:
-#
-# 1. It is not possible to call into the callbacks before the dictionary is
-# populated, so once in the callback the id must be in the dictionary.
-# 2. The callbacks don't mutate the dictionary, they only read from it, and
-# so cannot conflict with any of the insertions.
-#
-# This is good: if we had to lock in the callbacks we'd drastically slow down
-# the performance of this code.
-_connection_refs = weakref.WeakValueDictionary()
-_connection_ref_lock = threading.Lock()
-
-# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
-# for no better reason than we need *a* limit, and this one is right there.
-SSL_WRITE_BLOCKSIZE = 16384
-
-# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
-# individual cipher suites. We need to do this becuase this is how
-# SecureTransport wants them.
-CIPHER_SUITES = [
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
-]
-
-# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
-# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
-_protocol_to_min_max = {
- ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
-}
-
-if hasattr(ssl, "PROTOCOL_SSLv2"):
- _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
- SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
- )
-if hasattr(ssl, "PROTOCOL_SSLv3"):
- _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
- SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
- )
-if hasattr(ssl, "PROTOCOL_TLSv1"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
- SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
- )
-if hasattr(ssl, "PROTOCOL_TLSv1_1"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
- SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
- )
-if hasattr(ssl, "PROTOCOL_TLSv1_2"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
- SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
- )
-if hasattr(ssl, "PROTOCOL_TLS"):
- _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
-
-
-def inject_into_urllib3():
- """
- Monkey-patch urllib3 with SecureTransport-backed SSL-support.
- """
- util.ssl_.SSLContext = SecureTransportContext
- util.HAS_SNI = HAS_SNI
- util.ssl_.HAS_SNI = HAS_SNI
- util.IS_SECURETRANSPORT = True
- util.ssl_.IS_SECURETRANSPORT = True
-
-
-def extract_from_urllib3():
- """
- Undo monkey-patching by :func:`inject_into_urllib3`.
- """
- util.ssl_.SSLContext = orig_util_SSLContext
- util.HAS_SNI = orig_util_HAS_SNI
- util.ssl_.HAS_SNI = orig_util_HAS_SNI
- util.IS_SECURETRANSPORT = False
- util.ssl_.IS_SECURETRANSPORT = False
-
-
-def _read_callback(connection_id, data_buffer, data_length_pointer):
- """
- SecureTransport read callback. This is called by ST to request that data
- be returned from the socket.
- """
- wrapped_socket = None
- try:
- wrapped_socket = _connection_refs.get(connection_id)
- if wrapped_socket is None:
- return SecurityConst.errSSLInternal
- base_socket = wrapped_socket.socket
-
- requested_length = data_length_pointer[0]
-
- timeout = wrapped_socket.gettimeout()
- error = None
- read_count = 0
- buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
- buffer_view = memoryview(buffer)
-
- try:
- while read_count < requested_length:
- if timeout is None or timeout >= 0:
- readables = util.wait_for_read([base_socket], timeout)
- if not readables:
- raise socket.error(errno.EAGAIN, 'timed out')
-
- # We need to tell ctypes that we have a buffer that can be
- # written to. Upsettingly, we do that like this:
- chunk_size = base_socket.recv_into(
- buffer_view[read_count:requested_length]
- )
- read_count += chunk_size
- if not chunk_size:
- if not read_count:
- return SecurityConst.errSSLClosedGraceful
- break
- except (socket.error) as e:
- error = e.errno
-
- if error is not None and error != errno.EAGAIN:
- if error == errno.ECONNRESET:
- return SecurityConst.errSSLClosedAbort
- raise
-
- data_length_pointer[0] = read_count
-
- if read_count != requested_length:
- return SecurityConst.errSSLWouldBlock
-
- return 0
- except Exception as e:
- if wrapped_socket is not None:
- wrapped_socket._exception = e
- return SecurityConst.errSSLInternal
-
-
-def _write_callback(connection_id, data_buffer, data_length_pointer):
- """
- SecureTransport write callback. This is called by ST to request that data
- actually be sent on the network.
- """
- wrapped_socket = None
- try:
- wrapped_socket = _connection_refs.get(connection_id)
- if wrapped_socket is None:
- return SecurityConst.errSSLInternal
- base_socket = wrapped_socket.socket
-
- bytes_to_write = data_length_pointer[0]
- data = ctypes.string_at(data_buffer, bytes_to_write)
-
- timeout = wrapped_socket.gettimeout()
- error = None
- sent = 0
-
- try:
- while sent < bytes_to_write:
- if timeout is None or timeout >= 0:
- writables = util.wait_for_write([base_socket], timeout)
- if not writables:
- raise socket.error(errno.EAGAIN, 'timed out')
- chunk_sent = base_socket.send(data)
- sent += chunk_sent
-
- # This has some needless copying here, but I'm not sure there's
- # much value in optimising this data path.
- data = data[chunk_sent:]
- except (socket.error) as e:
- error = e.errno
-
- if error is not None and error != errno.EAGAIN:
- if error == errno.ECONNRESET:
- return SecurityConst.errSSLClosedAbort
- raise
-
- data_length_pointer[0] = sent
- if sent != bytes_to_write:
- return SecurityConst.errSSLWouldBlock
-
- return 0
- except Exception as e:
- if wrapped_socket is not None:
- wrapped_socket._exception = e
- return SecurityConst.errSSLInternal
-
-
-# We need to keep these two objects references alive: if they get GC'd while
-# in use then SecureTransport could attempt to call a function that is in freed
-# memory. That would be...uh...bad. Yeah, that's the word. Bad.
-_read_callback_pointer = Security.SSLReadFunc(_read_callback)
-_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
-
-
-class WrappedSocket(object):
- """
- API-compatibility wrapper for Python's OpenSSL wrapped socket object.
-
- Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
- collector of PyPy.
- """
- def __init__(self, socket):
- self.socket = socket
- self.context = None
- self._makefile_refs = 0
- self._closed = False
- self._exception = None
- self._keychain = None
- self._keychain_dir = None
- self._client_cert_chain = None
-
- # We save off the previously-configured timeout and then set it to
- # zero. This is done because we use select and friends to handle the
- # timeouts, but if we leave the timeout set on the lower socket then
- # Python will "kindly" call select on that socket again for us. Avoid
- # that by forcing the timeout to zero.
- self._timeout = self.socket.gettimeout()
- self.socket.settimeout(0)
-
- @contextlib.contextmanager
- def _raise_on_error(self):
- """
- A context manager that can be used to wrap calls that do I/O from
- SecureTransport. If any of the I/O callbacks hit an exception, this
- context manager will correctly propagate the exception after the fact.
- This avoids silently swallowing those exceptions.
-
- It also correctly forces the socket closed.
- """
- self._exception = None
-
- # We explicitly don't catch around this yield because in the unlikely
- # event that an exception was hit in the block we don't want to swallow
- # it.
- yield
- if self._exception is not None:
- exception, self._exception = self._exception, None
- self.close()
- raise exception
-
- def _set_ciphers(self):
- """
- Sets up the allowed ciphers. By default this matches the set in
- util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
- custom and doesn't allow changing at this time, mostly because parsing
- OpenSSL cipher strings is going to be a freaking nightmare.
- """
- ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
- result = Security.SSLSetEnabledCiphers(
- self.context, ciphers, len(CIPHER_SUITES)
- )
- _assert_no_error(result)
-
- def _custom_validate(self, verify, trust_bundle):
- """
- Called when we have set custom validation. We do this in two cases:
- first, when cert validation is entirely disabled; and second, when
- using a custom trust DB.
- """
- # If we disabled cert validation, just say: cool.
- if not verify:
- return
-
- # We want data in memory, so load it up.
- if os.path.isfile(trust_bundle):
- with open(trust_bundle, 'rb') as f:
- trust_bundle = f.read()
-
- cert_array = None
- trust = Security.SecTrustRef()
-
- try:
- # Get a CFArray that contains the certs we want.
- cert_array = _cert_array_from_pem(trust_bundle)
-
- # Ok, now the hard part. We want to get the SecTrustRef that ST has
- # created for this connection, shove our CAs into it, tell ST to
- # ignore everything else it knows, and then ask if it can build a
- # chain. This is a buuuunch of code.
- result = Security.SSLCopyPeerTrust(
- self.context, ctypes.byref(trust)
- )
- _assert_no_error(result)
- if not trust:
- raise ssl.SSLError("Failed to copy trust reference")
-
- result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
- _assert_no_error(result)
-
- result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
- _assert_no_error(result)
-
- trust_result = Security.SecTrustResultType()
- result = Security.SecTrustEvaluate(
- trust, ctypes.byref(trust_result)
- )
- _assert_no_error(result)
- finally:
- if trust:
- CoreFoundation.CFRelease(trust)
-
- if cert_array is None:
- CoreFoundation.CFRelease(cert_array)
-
- # Ok, now we can look at what the result was.
- successes = (
- SecurityConst.kSecTrustResultUnspecified,
- SecurityConst.kSecTrustResultProceed
- )
- if trust_result.value not in successes:
- raise ssl.SSLError(
- "certificate verify failed, error code: %d" %
- trust_result.value
- )
-
- def handshake(self,
- server_hostname,
- verify,
- trust_bundle,
- min_version,
- max_version,
- client_cert,
- client_key,
- client_key_passphrase):
- """
- Actually performs the TLS handshake. This is run automatically by
- wrapped socket, and shouldn't be needed in user code.
- """
- # First, we do the initial bits of connection setup. We need to create
- # a context, set its I/O funcs, and set the connection reference.
- self.context = Security.SSLCreateContext(
- None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
- )
- result = Security.SSLSetIOFuncs(
- self.context, _read_callback_pointer, _write_callback_pointer
- )
- _assert_no_error(result)
-
- # Here we need to compute the handle to use. We do this by taking the
- # id of self modulo 2**31 - 1. If this is already in the dictionary, we
- # just keep incrementing by one until we find a free space.
- with _connection_ref_lock:
- handle = id(self) % 2147483647
- while handle in _connection_refs:
- handle = (handle + 1) % 2147483647
- _connection_refs[handle] = self
-
- result = Security.SSLSetConnection(self.context, handle)
- _assert_no_error(result)
-
- # If we have a server hostname, we should set that too.
- if server_hostname:
- if not isinstance(server_hostname, bytes):
- server_hostname = server_hostname.encode('utf-8')
-
- result = Security.SSLSetPeerDomainName(
- self.context, server_hostname, len(server_hostname)
- )
- _assert_no_error(result)
-
- # Setup the ciphers.
- self._set_ciphers()
-
- # Set the minimum and maximum TLS versions.
- result = Security.SSLSetProtocolVersionMin(self.context, min_version)
- _assert_no_error(result)
- result = Security.SSLSetProtocolVersionMax(self.context, max_version)
- _assert_no_error(result)
-
- # If there's a trust DB, we need to use it. We do that by telling
- # SecureTransport to break on server auth. We also do that if we don't
- # want to validate the certs at all: we just won't actually do any
- # authing in that case.
- if not verify or trust_bundle is not None:
- result = Security.SSLSetSessionOption(
- self.context,
- SecurityConst.kSSLSessionOptionBreakOnServerAuth,
- True
- )
- _assert_no_error(result)
-
- # If there's a client cert, we need to use it.
- if client_cert:
- self._keychain, self._keychain_dir = _temporary_keychain()
- self._client_cert_chain = _load_client_cert_chain(
- self._keychain, client_cert, client_key
- )
- result = Security.SSLSetCertificate(
- self.context, self._client_cert_chain
- )
- _assert_no_error(result)
-
- while True:
- with self._raise_on_error():
- result = Security.SSLHandshake(self.context)
-
- if result == SecurityConst.errSSLWouldBlock:
- raise socket.timeout("handshake timed out")
- elif result == SecurityConst.errSSLServerAuthCompleted:
- self._custom_validate(verify, trust_bundle)
- continue
- else:
- _assert_no_error(result)
- break
-
- def fileno(self):
- return self.socket.fileno()
-
- # Copy-pasted from Python 3.5 source code
- def _decref_socketios(self):
- if self._makefile_refs > 0:
- self._makefile_refs -= 1
- if self._closed:
- self.close()
-
- def recv(self, bufsiz):
- buffer = ctypes.create_string_buffer(bufsiz)
- bytes_read = self.recv_into(buffer, bufsiz)
- data = buffer[:bytes_read]
- return data
-
- def recv_into(self, buffer, nbytes=None):
- # Read short on EOF.
- if self._closed:
- return 0
-
- if nbytes is None:
- nbytes = len(buffer)
-
- buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
- processed_bytes = ctypes.c_size_t(0)
-
- with self._raise_on_error():
- result = Security.SSLRead(
- self.context, buffer, nbytes, ctypes.byref(processed_bytes)
- )
-
- # There are some result codes that we want to treat as "not always
- # errors". Specifically, those are errSSLWouldBlock,
- # errSSLClosedGraceful, and errSSLClosedNoNotify.
- if (result == SecurityConst.errSSLWouldBlock):
- # If we didn't process any bytes, then this was just a time out.
- # However, we can get errSSLWouldBlock in situations when we *did*
- # read some data, and in those cases we should just read "short"
- # and return.
- if processed_bytes.value == 0:
- # Timed out, no data read.
- raise socket.timeout("recv timed out")
- elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
- # The remote peer has closed this connection. We should do so as
- # well. Note that we don't actually return here because in
- # principle this could actually be fired along with return data.
- # It's unlikely though.
- self.close()
- else:
- _assert_no_error(result)
-
- # Ok, we read and probably succeeded. We should return whatever data
- # was actually read.
- return processed_bytes.value
-
- def settimeout(self, timeout):
- self._timeout = timeout
-
- def gettimeout(self):
- return self._timeout
-
- def send(self, data):
- processed_bytes = ctypes.c_size_t(0)
-
- with self._raise_on_error():
- result = Security.SSLWrite(
- self.context, data, len(data), ctypes.byref(processed_bytes)
- )
-
- if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
- # Timed out
- raise socket.timeout("send timed out")
- else:
- _assert_no_error(result)
-
- # We sent, and probably succeeded. Tell them how much we sent.
- return processed_bytes.value
-
- def sendall(self, data):
- total_sent = 0
- while total_sent < len(data):
- sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
- total_sent += sent
-
- def shutdown(self):
- with self._raise_on_error():
- Security.SSLClose(self.context)
-
- def close(self):
- # TODO: should I do clean shutdown here? Do I have to?
- if self._makefile_refs < 1:
- self._closed = True
- if self.context:
- CoreFoundation.CFRelease(self.context)
- self.context = None
- if self._client_cert_chain:
- CoreFoundation.CFRelease(self._client_cert_chain)
- self._client_cert_chain = None
- if self._keychain:
- Security.SecKeychainDelete(self._keychain)
- CoreFoundation.CFRelease(self._keychain)
- shutil.rmtree(self._keychain_dir)
- self._keychain = self._keychain_dir = None
- return self.socket.close()
- else:
- self._makefile_refs -= 1
-
- def getpeercert(self, binary_form=False):
- # Urgh, annoying.
- #
- # Here's how we do this:
- #
- # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
- # connection.
- # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
- # 3. To get the CN, call SecCertificateCopyCommonName and process that
- # string so that it's of the appropriate type.
- # 4. To get the SAN, we need to do something a bit more complex:
- # a. Call SecCertificateCopyValues to get the data, requesting
- # kSecOIDSubjectAltName.
- # b. Mess about with this dictionary to try to get the SANs out.
- #
- # This is gross. Really gross. It's going to be a few hundred LoC extra
- # just to repeat something that SecureTransport can *already do*. So my
- # operating assumption at this time is that what we want to do is
- # instead to just flag to urllib3 that it shouldn't do its own hostname
- # validation when using SecureTransport.
- if not binary_form:
- raise ValueError(
- "SecureTransport only supports dumping binary certs"
- )
- trust = Security.SecTrustRef()
- certdata = None
- der_bytes = None
-
- try:
- # Grab the trust store.
- result = Security.SSLCopyPeerTrust(
- self.context, ctypes.byref(trust)
- )
- _assert_no_error(result)
- if not trust:
- # Probably we haven't done the handshake yet. No biggie.
- return None
-
- cert_count = Security.SecTrustGetCertificateCount(trust)
- if not cert_count:
- # Also a case that might happen if we haven't handshaked.
- # Handshook? Handshaken?
- return None
-
- leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
- assert leaf
-
- # Ok, now we want the DER bytes.
- certdata = Security.SecCertificateCopyData(leaf)
- assert certdata
-
- data_length = CoreFoundation.CFDataGetLength(certdata)
- data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
- der_bytes = ctypes.string_at(data_buffer, data_length)
- finally:
- if certdata:
- CoreFoundation.CFRelease(certdata)
- if trust:
- CoreFoundation.CFRelease(trust)
-
- return der_bytes
-
- def _reuse(self):
- self._makefile_refs += 1
-
- def _drop(self):
- if self._makefile_refs < 1:
- self.close()
- else:
- self._makefile_refs -= 1
-
-
-if _fileobject: # Platform-specific: Python 2
- def makefile(self, mode, bufsize=-1):
- self._makefile_refs += 1
- return _fileobject(self, mode, bufsize, close=True)
-else: # Platform-specific: Python 3
- def makefile(self, mode="r", buffering=None, *args, **kwargs):
- # We disable buffering with SecureTransport because it conflicts with
- # the buffering that ST does internally (see issue #1153 for more).
- buffering = 0
- return backport_makefile(self, mode, buffering, *args, **kwargs)
-
-WrappedSocket.makefile = makefile
-
-
-class SecureTransportContext(object):
- """
- I am a wrapper class for the SecureTransport library, to translate the
- interface of the standard library ``SSLContext`` object to calls into
- SecureTransport.
- """
- def __init__(self, protocol):
- self._min_version, self._max_version = _protocol_to_min_max[protocol]
- self._options = 0
- self._verify = False
- self._trust_bundle = None
- self._client_cert = None
- self._client_key = None
- self._client_key_passphrase = None
-
- @property
- def check_hostname(self):
- """
- SecureTransport cannot have its hostname checking disabled. For more,
- see the comment on getpeercert() in this file.
- """
- return True
-
- @check_hostname.setter
- def check_hostname(self, value):
- """
- SecureTransport cannot have its hostname checking disabled. For more,
- see the comment on getpeercert() in this file.
- """
- pass
-
- @property
- def options(self):
- # TODO: Well, crap.
- #
- # So this is the bit of the code that is the most likely to cause us
- # trouble. Essentially we need to enumerate all of the SSL options that
- # users might want to use and try to see if we can sensibly translate
- # them, or whether we should just ignore them.
- return self._options
-
- @options.setter
- def options(self, value):
- # TODO: Update in line with above.
- self._options = value
-
- @property
- def verify_mode(self):
- return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
-
- @verify_mode.setter
- def verify_mode(self, value):
- self._verify = True if value == ssl.CERT_REQUIRED else False
-
- def set_default_verify_paths(self):
- # So, this has to do something a bit weird. Specifically, what it does
- # is nothing.
- #
- # This means that, if we had previously had load_verify_locations
- # called, this does not undo that. We need to do that because it turns
- # out that the rest of the urllib3 code will attempt to load the
- # default verify paths if it hasn't been told about any paths, even if
- # the context itself was sometime earlier. We resolve that by just
- # ignoring it.
- pass
-
- def load_default_certs(self):
- return self.set_default_verify_paths()
-
- def set_ciphers(self, ciphers):
- # For now, we just require the default cipher string.
- if ciphers != util.ssl_.DEFAULT_CIPHERS:
- raise ValueError(
- "SecureTransport doesn't support custom cipher strings"
- )
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- # OK, we only really support cadata and cafile.
- if capath is not None:
- raise ValueError(
- "SecureTransport does not support cert directories"
- )
-
- self._trust_bundle = cafile or cadata
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._client_cert = certfile
- self._client_key = keyfile
- self._client_cert_passphrase = password
-
- def wrap_socket(self, sock, server_side=False,
- do_handshake_on_connect=True, suppress_ragged_eofs=True,
- server_hostname=None):
- # So, what do we do here? Firstly, we assert some properties. This is a
- # stripped down shim, so there is some functionality we don't support.
- # See PEP 543 for the real deal.
- assert not server_side
- assert do_handshake_on_connect
- assert suppress_ragged_eofs
-
- # Ok, we're good to go. Now we want to create the wrapped socket object
- # and store it in the appropriate place.
- wrapped_socket = WrappedSocket(sock)
-
- # Now we can handshake
- wrapped_socket.handshake(
- server_hostname, self._verify, self._trust_bundle,
- self._min_version, self._max_version, self._client_cert,
- self._client_key, self._client_key_passphrase
- )
- return wrapped_socket
diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/python.d/python_modules/urllib3/contrib/socks.py
deleted file mode 100644
index 39e92fde1..000000000
--- a/python.d/python_modules/urllib3/contrib/socks.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module contains provisional support for SOCKS proxies from within
-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
-SOCKS5. To enable its functionality, either install PySocks or install this
-module with the ``socks`` extra.
-
-The SOCKS implementation supports the full range of urllib3 features. It also
-supports the following SOCKS features:
-
-- SOCKS4
-- SOCKS4a
-- SOCKS5
-- Usernames and passwords for the SOCKS proxy
-
-Known Limitations:
-
-- Currently PySocks does not support contacting remote websites via literal
- IPv6 addresses. Any such connection attempt will fail. You must use a domain
- name.
-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
- such connection attempt will fail.
-"""
-from __future__ import absolute_import
-
-try:
- import socks
-except ImportError:
- import warnings
- from ..exceptions import DependencyWarning
-
- warnings.warn((
- 'SOCKS support in urllib3 requires the installation of optional '
- 'dependencies: specifically, PySocks. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
- ),
- DependencyWarning
- )
- raise
-
-from socket import error as SocketError, timeout as SocketTimeout
-
-from ..connection import (
- HTTPConnection, HTTPSConnection
-)
-from ..connectionpool import (
- HTTPConnectionPool, HTTPSConnectionPool
-)
-from ..exceptions import ConnectTimeoutError, NewConnectionError
-from ..poolmanager import PoolManager
-from ..util.url import parse_url
-
-try:
- import ssl
-except ImportError:
- ssl = None
-
-
-class SOCKSConnection(HTTPConnection):
- """
- A plain-text HTTP connection that connects via a SOCKS proxy.
- """
- def __init__(self, *args, **kwargs):
- self._socks_options = kwargs.pop('_socks_options')
- super(SOCKSConnection, self).__init__(*args, **kwargs)
-
- def _new_conn(self):
- """
- Establish a new connection via the SOCKS proxy.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw['source_address'] = self.source_address
-
- if self.socket_options:
- extra_kw['socket_options'] = self.socket_options
-
- try:
- conn = socks.create_connection(
- (self.host, self.port),
- proxy_type=self._socks_options['socks_version'],
- proxy_addr=self._socks_options['proxy_host'],
- proxy_port=self._socks_options['proxy_port'],
- proxy_username=self._socks_options['username'],
- proxy_password=self._socks_options['password'],
- proxy_rdns=self._socks_options['rdns'],
- timeout=self.timeout,
- **extra_kw
- )
-
- except SocketTimeout as e:
- raise ConnectTimeoutError(
- self, "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout))
-
- except socks.ProxyError as e:
- # This is fragile as hell, but it seems to be the only way to raise
- # useful errors here.
- if e.socket_err:
- error = e.socket_err
- if isinstance(error, SocketTimeout):
- raise ConnectTimeoutError(
- self,
- "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout)
- )
- else:
- raise NewConnectionError(
- self,
- "Failed to establish a new connection: %s" % error
- )
- else:
- raise NewConnectionError(
- self,
- "Failed to establish a new connection: %s" % e
- )
-
- except SocketError as e: # Defensive: PySocks should catch all these.
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e)
-
- return conn
-
-
-# We don't need to duplicate the Verified/Unverified distinction from
-# urllib3/connection.py here because the HTTPSConnection will already have been
-# correctly set to either the Verified or Unverified form by that module. This
-# means the SOCKSHTTPSConnection will automatically be the correct type.
-class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
- pass
-
-
-class SOCKSHTTPConnectionPool(HTTPConnectionPool):
- ConnectionCls = SOCKSConnection
-
-
-class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
- ConnectionCls = SOCKSHTTPSConnection
-
-
-class SOCKSProxyManager(PoolManager):
- """
- A version of the urllib3 ProxyManager that routes connections via the
- defined SOCKS proxy.
- """
- pool_classes_by_scheme = {
- 'http': SOCKSHTTPConnectionPool,
- 'https': SOCKSHTTPSConnectionPool,
- }
-
- def __init__(self, proxy_url, username=None, password=None,
- num_pools=10, headers=None, **connection_pool_kw):
- parsed = parse_url(proxy_url)
-
- if parsed.scheme == 'socks5':
- socks_version = socks.PROXY_TYPE_SOCKS5
- rdns = False
- elif parsed.scheme == 'socks5h':
- socks_version = socks.PROXY_TYPE_SOCKS5
- rdns = True
- elif parsed.scheme == 'socks4':
- socks_version = socks.PROXY_TYPE_SOCKS4
- rdns = False
- elif parsed.scheme == 'socks4a':
- socks_version = socks.PROXY_TYPE_SOCKS4
- rdns = True
- else:
- raise ValueError(
- "Unable to determine SOCKS version from %s" % proxy_url
- )
-
- self.proxy_url = proxy_url
-
- socks_options = {
- 'socks_version': socks_version,
- 'proxy_host': parsed.host,
- 'proxy_port': parsed.port,
- 'username': username,
- 'password': password,
- 'rdns': rdns
- }
- connection_pool_kw['_socks_options'] = socks_options
-
- super(SOCKSProxyManager, self).__init__(
- num_pools, headers, **connection_pool_kw
- )
-
- self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/python.d/python_modules/urllib3/exceptions.py b/python.d/python_modules/urllib3/exceptions.py
deleted file mode 100644
index 6c4be5810..000000000
--- a/python.d/python_modules/urllib3/exceptions.py
+++ /dev/null
@@ -1,246 +0,0 @@
-from __future__ import absolute_import
-from .packages.six.moves.http_client import (
- IncompleteRead as httplib_IncompleteRead
-)
-# Base Exceptions
-
-
-class HTTPError(Exception):
- "Base exception used by this module."
- pass
-
-
-class HTTPWarning(Warning):
- "Base warning used by this module."
- pass
-
-
-class PoolError(HTTPError):
- "Base exception for errors caused within a pool."
- def __init__(self, pool, message):
- self.pool = pool
- HTTPError.__init__(self, "%s: %s" % (pool, message))
-
- def __reduce__(self):
- # For pickling purposes.
- return self.__class__, (None, None)
-
-
-class RequestError(PoolError):
- "Base exception for PoolErrors that have associated URLs."
- def __init__(self, pool, url, message):
- self.url = url
- PoolError.__init__(self, pool, message)
-
- def __reduce__(self):
- # For pickling purposes.
- return self.__class__, (None, self.url, None)
-
-
-class SSLError(HTTPError):
- "Raised when SSL certificate fails in an HTTPS connection."
- pass
-
-
-class ProxyError(HTTPError):
- "Raised when the connection to a proxy fails."
- pass
-
-
-class DecodeError(HTTPError):
- "Raised when automatic decoding based on Content-Type fails."
- pass
-
-
-class ProtocolError(HTTPError):
- "Raised when something unexpected happens mid-request/response."
- pass
-
-
-#: Renamed to ProtocolError but aliased for backwards compatibility.
-ConnectionError = ProtocolError
-
-
-# Leaf Exceptions
-
-class MaxRetryError(RequestError):
- """Raised when the maximum number of retries is exceeded.
-
- :param pool: The connection pool
- :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
- :param string url: The requested Url
- :param exceptions.Exception reason: The underlying error
-
- """
-
- def __init__(self, pool, url, reason=None):
- self.reason = reason
-
- message = "Max retries exceeded with url: %s (Caused by %r)" % (
- url, reason)
-
- RequestError.__init__(self, pool, url, message)
-
-
-class HostChangedError(RequestError):
- "Raised when an existing pool gets a request for a foreign host."
-
- def __init__(self, pool, url, retries=3):
- message = "Tried to open a foreign host with url: %s" % url
- RequestError.__init__(self, pool, url, message)
- self.retries = retries
-
-
-class TimeoutStateError(HTTPError):
- """ Raised when passing an invalid state to a timeout """
- pass
-
-
-class TimeoutError(HTTPError):
- """ Raised when a socket timeout error occurs.
-
- Catching this error will catch both :exc:`ReadTimeoutErrors
- <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
- """
- pass
-
-
-class ReadTimeoutError(TimeoutError, RequestError):
- "Raised when a socket timeout occurs while receiving data from a server"
- pass
-
-
-# This timeout error does not have a URL attached and needs to inherit from the
-# base HTTPError
-class ConnectTimeoutError(TimeoutError):
- "Raised when a socket timeout occurs while connecting to a server"
- pass
-
-
-class NewConnectionError(ConnectTimeoutError, PoolError):
- "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
- pass
-
-
-class EmptyPoolError(PoolError):
- "Raised when a pool runs out of connections and no more are allowed."
- pass
-
-
-class ClosedPoolError(PoolError):
- "Raised when a request enters a pool after the pool has been closed."
- pass
-
-
-class LocationValueError(ValueError, HTTPError):
- "Raised when there is something wrong with a given URL input."
- pass
-
-
-class LocationParseError(LocationValueError):
- "Raised when get_host or similar fails to parse the URL input."
-
- def __init__(self, location):
- message = "Failed to parse: %s" % location
- HTTPError.__init__(self, message)
-
- self.location = location
-
-
-class ResponseError(HTTPError):
- "Used as a container for an error reason supplied in a MaxRetryError."
- GENERIC_ERROR = 'too many error responses'
- SPECIFIC_ERROR = 'too many {status_code} error responses'
-
-
-class SecurityWarning(HTTPWarning):
- "Warned when perfoming security reducing actions"
- pass
-
-
-class SubjectAltNameWarning(SecurityWarning):
- "Warned when connecting to a host with a certificate missing a SAN."
- pass
-
-
-class InsecureRequestWarning(SecurityWarning):
- "Warned when making an unverified HTTPS request."
- pass
-
-
-class SystemTimeWarning(SecurityWarning):
- "Warned when system time is suspected to be wrong"
- pass
-
-
-class InsecurePlatformWarning(SecurityWarning):
- "Warned when certain SSL configuration is not available on a platform."
- pass
-
-
-class SNIMissingWarning(HTTPWarning):
- "Warned when making a HTTPS request without SNI available."
- pass
-
-
-class DependencyWarning(HTTPWarning):
- """
- Warned when an attempt is made to import a module with missing optional
- dependencies.
- """
- pass
-
-
-class ResponseNotChunked(ProtocolError, ValueError):
- "Response needs to be chunked in order to read it as chunks."
- pass
-
-
-class BodyNotHttplibCompatible(HTTPError):
- """
- Body should be httplib.HTTPResponse like (have an fp attribute which
- returns raw chunks) for read_chunked().
- """
- pass
-
-
-class IncompleteRead(HTTPError, httplib_IncompleteRead):
- """
- Response length doesn't match expected Content-Length
-
- Subclass of http_client.IncompleteRead to allow int value
- for `partial` to avoid creating large objects on streamed
- reads.
- """
- def __init__(self, partial, expected):
- super(IncompleteRead, self).__init__(partial, expected)
-
- def __repr__(self):
- return ('IncompleteRead(%i bytes read, '
- '%i more expected)' % (self.partial, self.expected))
-
-
-class InvalidHeader(HTTPError):
- "The header provided was somehow invalid."
- pass
-
-
-class ProxySchemeUnknown(AssertionError, ValueError):
- "ProxyManager does not support the supplied scheme"
- # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
-
- def __init__(self, scheme):
- message = "Not supported proxy scheme %s" % scheme
- super(ProxySchemeUnknown, self).__init__(message)
-
-
-class HeaderParsingError(HTTPError):
- "Raised by assert_header_parsing, but we convert it to a log.warning statement."
- def __init__(self, defects, unparsed_data):
- message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
- super(HeaderParsingError, self).__init__(message)
-
-
-class UnrewindableBodyError(HTTPError):
- "urllib3 encountered an error when trying to rewind a body"
- pass
diff --git a/python.d/python_modules/urllib3/fields.py b/python.d/python_modules/urllib3/fields.py
deleted file mode 100644
index 19b0ae0c8..000000000
--- a/python.d/python_modules/urllib3/fields.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from __future__ import absolute_import
-import email.utils
-import mimetypes
-
-from .packages import six
-
-
-def guess_content_type(filename, default='application/octet-stream'):
- """
- Guess the "Content-Type" of a file.
-
- :param filename:
- The filename to guess the "Content-Type" of using :mod:`mimetypes`.
- :param default:
- If no "Content-Type" can be guessed, default to `default`.
- """
- if filename:
- return mimetypes.guess_type(filename)[0] or default
- return default
-
-
-def format_header_param(name, value):
- """
- Helper function to format and quote a single header parameter.
-
- Particularly useful for header parameters which might contain
- non-ASCII values, like file names. This follows RFC 2231, as
- suggested by RFC 2388 Section 4.4.
-
- :param name:
- The name of the parameter, a string expected to be ASCII only.
- :param value:
- The value of the parameter, provided as a unicode string.
- """
- if not any(ch in value for ch in '"\\\r\n'):
- result = '%s="%s"' % (name, value)
- try:
- result.encode('ascii')
- except (UnicodeEncodeError, UnicodeDecodeError):
- pass
- else:
- return result
- if not six.PY3 and isinstance(value, six.text_type): # Python 2:
- value = value.encode('utf-8')
- value = email.utils.encode_rfc2231(value, 'utf-8')
- value = '%s*=%s' % (name, value)
- return value
-
-
-class RequestField(object):
- """
- A data container for request body parameters.
-
- :param name:
- The name of this request field.
- :param data:
- The data/value body.
- :param filename:
- An optional filename of the request field.
- :param headers:
- An optional dict-like object of headers to initially use for the field.
- """
- def __init__(self, name, data, filename=None, headers=None):
- self._name = name
- self._filename = filename
- self.data = data
- self.headers = {}
- if headers:
- self.headers = dict(headers)
-
- @classmethod
- def from_tuples(cls, fieldname, value):
- """
- A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
-
- Supports constructing :class:`~urllib3.fields.RequestField` from
- parameter of key/value strings AND key/filetuple. A filetuple is a
- (filename, data, MIME type) tuple where the MIME type is optional.
- For example::
-
- 'foo': 'bar',
- 'fakefile': ('foofile.txt', 'contents of foofile'),
- 'realfile': ('barfile.txt', open('realfile').read()),
- 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
- 'nonamefile': 'contents of nonamefile field',
-
- Field names and filenames must be unicode.
- """
- if isinstance(value, tuple):
- if len(value) == 3:
- filename, data, content_type = value
- else:
- filename, data = value
- content_type = guess_content_type(filename)
- else:
- filename = None
- content_type = None
- data = value
-
- request_param = cls(fieldname, data, filename=filename)
- request_param.make_multipart(content_type=content_type)
-
- return request_param
-
- def _render_part(self, name, value):
- """
- Overridable helper function to format a single header parameter.
-
- :param name:
- The name of the parameter, a string expected to be ASCII only.
- :param value:
- The value of the parameter, provided as a unicode string.
- """
- return format_header_param(name, value)
-
- def _render_parts(self, header_parts):
- """
- Helper function to format and quote a single header.
-
- Useful for single headers that are composed of multiple items. E.g.,
- 'Content-Disposition' fields.
-
- :param header_parts:
- A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
- as `k1="v1"; k2="v2"; ...`.
- """
- parts = []
- iterable = header_parts
- if isinstance(header_parts, dict):
- iterable = header_parts.items()
-
- for name, value in iterable:
- if value is not None:
- parts.append(self._render_part(name, value))
-
- return '; '.join(parts)
-
- def render_headers(self):
- """
- Renders the headers for this request field.
- """
- lines = []
-
- sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
- for sort_key in sort_keys:
- if self.headers.get(sort_key, False):
- lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
-
- for header_name, header_value in self.headers.items():
- if header_name not in sort_keys:
- if header_value:
- lines.append('%s: %s' % (header_name, header_value))
-
- lines.append('\r\n')
- return '\r\n'.join(lines)
-
- def make_multipart(self, content_disposition=None, content_type=None,
- content_location=None):
- """
- Makes this request field into a multipart request field.
-
- This method overrides "Content-Disposition", "Content-Type" and
- "Content-Location" headers to the request parameter.
-
- :param content_type:
- The 'Content-Type' of the request body.
- :param content_location:
- The 'Content-Location' of the request body.
-
- """
- self.headers['Content-Disposition'] = content_disposition or 'form-data'
- self.headers['Content-Disposition'] += '; '.join([
- '', self._render_parts(
- (('name', self._name), ('filename', self._filename))
- )
- ])
- self.headers['Content-Type'] = content_type
- self.headers['Content-Location'] = content_location
diff --git a/python.d/python_modules/urllib3/filepost.py b/python.d/python_modules/urllib3/filepost.py
deleted file mode 100644
index cd11cee46..000000000
--- a/python.d/python_modules/urllib3/filepost.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import absolute_import
-import codecs
-
-from uuid import uuid4
-from io import BytesIO
-
-from .packages import six
-from .packages.six import b
-from .fields import RequestField
-
-writer = codecs.lookup('utf-8')[3]
-
-
-def choose_boundary():
- """
- Our embarrassingly-simple replacement for mimetools.choose_boundary.
- """
- return uuid4().hex
-
-
-def iter_field_objects(fields):
- """
- Iterate over fields.
-
- Supports list of (k, v) tuples and dicts, and lists of
- :class:`~urllib3.fields.RequestField`.
-
- """
- if isinstance(fields, dict):
- i = six.iteritems(fields)
- else:
- i = iter(fields)
-
- for field in i:
- if isinstance(field, RequestField):
- yield field
- else:
- yield RequestField.from_tuples(*field)
-
-
-def iter_fields(fields):
- """
- .. deprecated:: 1.6
-
- Iterate over fields.
-
- The addition of :class:`~urllib3.fields.RequestField` makes this function
- obsolete. Instead, use :func:`iter_field_objects`, which returns
- :class:`~urllib3.fields.RequestField` objects.
-
- Supports list of (k, v) tuples and dicts.
- """
- if isinstance(fields, dict):
- return ((k, v) for k, v in six.iteritems(fields))
-
- return ((k, v) for k, v in fields)
-
-
-def encode_multipart_formdata(fields, boundary=None):
- """
- Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
-
- :param fields:
- Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
-
- :param boundary:
- If not specified, then a random boundary will be generated using
- :func:`mimetools.choose_boundary`.
- """
- body = BytesIO()
- if boundary is None:
- boundary = choose_boundary()
-
- for field in iter_field_objects(fields):
- body.write(b('--%s\r\n' % (boundary)))
-
- writer(body).write(field.render_headers())
- data = field.data
-
- if isinstance(data, int):
- data = str(data) # Backwards compatibility
-
- if isinstance(data, six.text_type):
- writer(body).write(data)
- else:
- body.write(data)
-
- body.write(b'\r\n')
-
- body.write(b('--%s--\r\n' % (boundary)))
-
- content_type = str('multipart/form-data; boundary=%s' % boundary)
-
- return body.getvalue(), content_type
diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/python.d/python_modules/urllib3/packages/__init__.py
deleted file mode 100644
index 170e974c1..000000000
--- a/python.d/python_modules/urllib3/packages/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import absolute_import
-
-from . import ssl_match_hostname
-
-__all__ = ('ssl_match_hostname', )
diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/python.d/python_modules/urllib3/packages/backports/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/python.d/python_modules/urllib3/packages/backports/__init__.py
+++ /dev/null
diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/python.d/python_modules/urllib3/packages/backports/makefile.py
deleted file mode 100644
index 75b80dcf8..000000000
--- a/python.d/python_modules/urllib3/packages/backports/makefile.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-backports.makefile
-~~~~~~~~~~~~~~~~~~
-
-Backports the Python 3 ``socket.makefile`` method for use with anything that
-wants to create a "fake" socket object.
-"""
-import io
-
-from socket import SocketIO
-
-
-def backport_makefile(self, mode="r", buffering=None, encoding=None,
- errors=None, newline=None):
- """
- Backport of ``socket.makefile`` from Python 3.5.
- """
- if not set(mode) <= set(["r", "w", "b"]):
- raise ValueError(
- "invalid mode %r (only r, w, b allowed)" % (mode,)
- )
- writing = "w" in mode
- reading = "r" in mode or not writing
- assert reading or writing
- binary = "b" in mode
- rawmode = ""
- if reading:
- rawmode += "r"
- if writing:
- rawmode += "w"
- raw = SocketIO(self, rawmode)
- self._makefile_refs += 1
- if buffering is None:
- buffering = -1
- if buffering < 0:
- buffering = io.DEFAULT_BUFFER_SIZE
- if buffering == 0:
- if not binary:
- raise ValueError("unbuffered streams must be binary")
- return raw
- if reading and writing:
- buffer = io.BufferedRWPair(raw, raw, buffering)
- elif reading:
- buffer = io.BufferedReader(raw, buffering)
- else:
- assert writing
- buffer = io.BufferedWriter(raw, buffering)
- if binary:
- return buffer
- text = io.TextIOWrapper(buffer, encoding, errors, newline)
- text.mode = mode
- return text
diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/python.d/python_modules/urllib3/packages/ordered_dict.py
deleted file mode 100644
index 4479363cc..000000000
--- a/python.d/python_modules/urllib3/packages/ordered_dict.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
-# Passes Python2.7's test suite and incorporates all the latest updates.
-# Copyright 2009 Raymond Hettinger, released under the MIT License.
-# http://code.activestate.com/recipes/576693/
-try:
- from thread import get_ident as _get_ident
-except ImportError:
- from dummy_thread import get_ident as _get_ident
-
-try:
- from _abcoll import KeysView, ValuesView, ItemsView
-except ImportError:
- pass
-
-
-class OrderedDict(dict):
- 'Dictionary that remembers insertion order'
- # An inherited dict maps keys to values.
- # The inherited dict provides __getitem__, __len__, __contains__, and get.
- # The remaining methods are order-aware.
- # Big-O running times for all methods are the same as for regular dictionaries.
-
- # The internal self.__map dictionary maps keys to links in a doubly linked list.
- # The circular doubly linked list starts and ends with a sentinel element.
- # The sentinel element never gets deleted (this simplifies the algorithm).
- # Each link is stored as a list of length three: [PREV, NEXT, KEY].
-
- def __init__(self, *args, **kwds):
- '''Initialize an ordered dictionary. Signature is the same as for
- regular dictionaries, but keyword arguments are not recommended
- because their insertion order is arbitrary.
-
- '''
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__root
- except AttributeError:
- self.__root = root = [] # sentinel node
- root[:] = [root, root, None]
- self.__map = {}
- self.__update(*args, **kwds)
-
- def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
- 'od.__setitem__(i, y) <==> od[i]=y'
- # Setting a new item creates a new link which goes at the end of the linked
- # list, and the inherited dictionary is updated with the new key/value pair.
- if key not in self:
- root = self.__root
- last = root[0]
- last[1] = root[0] = self.__map[key] = [last, root, key]
- dict_setitem(self, key, value)
-
- def __delitem__(self, key, dict_delitem=dict.__delitem__):
- 'od.__delitem__(y) <==> del od[y]'
- # Deleting an existing item uses self.__map to find the link which is
- # then removed by updating the links in the predecessor and successor nodes.
- dict_delitem(self, key)
- link_prev, link_next, key = self.__map.pop(key)
- link_prev[1] = link_next
- link_next[0] = link_prev
-
- def __iter__(self):
- 'od.__iter__() <==> iter(od)'
- root = self.__root
- curr = root[1]
- while curr is not root:
- yield curr[2]
- curr = curr[1]
-
- def __reversed__(self):
- 'od.__reversed__() <==> reversed(od)'
- root = self.__root
- curr = root[0]
- while curr is not root:
- yield curr[2]
- curr = curr[0]
-
- def clear(self):
- 'od.clear() -> None. Remove all items from od.'
- try:
- for node in self.__map.itervalues():
- del node[:]
- root = self.__root
- root[:] = [root, root, None]
- self.__map.clear()
- except AttributeError:
- pass
- dict.clear(self)
-
- def popitem(self, last=True):
- '''od.popitem() -> (k, v), return and remove a (key, value) pair.
- Pairs are returned in LIFO order if last is true or FIFO order if false.
-
- '''
- if not self:
- raise KeyError('dictionary is empty')
- root = self.__root
- if last:
- link = root[0]
- link_prev = link[0]
- link_prev[1] = root
- root[0] = link_prev
- else:
- link = root[1]
- link_next = link[1]
- root[1] = link_next
- link_next[0] = root
- key = link[2]
- del self.__map[key]
- value = dict.pop(self, key)
- return key, value
-
- # -- the following methods do not depend on the internal structure --
-
- def keys(self):
- 'od.keys() -> list of keys in od'
- return list(self)
-
- def values(self):
- 'od.values() -> list of values in od'
- return [self[key] for key in self]
-
- def items(self):
- 'od.items() -> list of (key, value) pairs in od'
- return [(key, self[key]) for key in self]
-
- def iterkeys(self):
- 'od.iterkeys() -> an iterator over the keys in od'
- return iter(self)
-
- def itervalues(self):
- 'od.itervalues -> an iterator over the values in od'
- for k in self:
- yield self[k]
-
- def iteritems(self):
- 'od.iteritems -> an iterator over the (key, value) items in od'
- for k in self:
- yield (k, self[k])
-
- def update(*args, **kwds):
- '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
-
- If E is a dict instance, does: for k in E: od[k] = E[k]
- If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
- Or if E is an iterable of items, does: for k, v in E: od[k] = v
- In either case, this is followed by: for k, v in F.items(): od[k] = v
-
- '''
- if len(args) > 2:
- raise TypeError('update() takes at most 2 positional '
- 'arguments (%d given)' % (len(args),))
- elif not args:
- raise TypeError('update() takes at least 1 argument (0 given)')
- self = args[0]
- # Make progressively weaker assumptions about "other"
- other = ()
- if len(args) == 2:
- other = args[1]
- if isinstance(other, dict):
- for key in other:
- self[key] = other[key]
- elif hasattr(other, 'keys'):
- for key in other.keys():
- self[key] = other[key]
- else:
- for key, value in other:
- self[key] = value
- for key, value in kwds.items():
- self[key] = value
-
- __update = update # let subclasses override update without breaking __init__
-
- __marker = object()
-
- def pop(self, key, default=__marker):
- '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
-
- '''
- if key in self:
- result = self[key]
- del self[key]
- return result
- if default is self.__marker:
- raise KeyError(key)
- return default
-
- def setdefault(self, key, default=None):
- 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
- if key in self:
- return self[key]
- self[key] = default
- return default
-
- def __repr__(self, _repr_running={}):
- 'od.__repr__() <==> repr(od)'
- call_key = id(self), _get_ident()
- if call_key in _repr_running:
- return '...'
- _repr_running[call_key] = 1
- try:
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
- finally:
- del _repr_running[call_key]
-
- def __reduce__(self):
- 'Return state information for pickling'
- items = [[k, self[k]] for k in self]
- inst_dict = vars(self).copy()
- for k in vars(OrderedDict()):
- inst_dict.pop(k, None)
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def copy(self):
- 'od.copy() -> a shallow copy of od'
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
- and values equal to v (which defaults to None).
-
- '''
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
- while comparison to a regular mapping is order-insensitive.
-
- '''
- if isinstance(other, OrderedDict):
- return len(self)==len(other) and self.items() == other.items()
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
-
- # -- the following methods are only used in Python 2.7 --
-
- def viewkeys(self):
- "od.viewkeys() -> a set-like object providing a view on od's keys"
- return KeysView(self)
-
- def viewvalues(self):
- "od.viewvalues() -> an object providing a view on od's values"
- return ValuesView(self)
-
- def viewitems(self):
- "od.viewitems() -> a set-like object providing a view on od's items"
- return ItemsView(self)
diff --git a/python.d/python_modules/urllib3/packages/six.py b/python.d/python_modules/urllib3/packages/six.py
deleted file mode 100644
index 190c0239c..000000000
--- a/python.d/python_modules/urllib3/packages/six.py
+++ /dev/null
@@ -1,868 +0,0 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-from __future__ import absolute_import
-
-import functools
-import itertools
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.10.0"
-
-
-# Useful for very coarse version differentiation.
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-PY34 = sys.version_info[0:2] >= (3, 4)
-
-if PY3:
- string_types = str,
- integer_types = int,
- class_types = type,
- text_type = str
- binary_type = bytes
-
- MAXSIZE = sys.maxsize
-else:
- string_types = basestring,
- integer_types = (int, long)
- class_types = (type, types.ClassType)
- text_type = unicode
- binary_type = str
-
- if sys.platform.startswith("java"):
- # Jython always uses 32 bits.
- MAXSIZE = int((1 << 31) - 1)
- else:
- # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
- class X(object):
-
- def __len__(self):
- return 1 << 31
- try:
- len(X())
- except OverflowError:
- # 32-bit
- MAXSIZE = int((1 << 31) - 1)
- else:
- # 64-bit
- MAXSIZE = int((1 << 63) - 1)
- del X
-
-
-def _add_doc(func, doc):
- """Add documentation to a function."""
- func.__doc__ = doc
-
-
-def _import_module(name):
- """Import module, returning the module after the last dot."""
- __import__(name)
- return sys.modules[name]
-
-
-class _LazyDescr(object):
-
- def __init__(self, name):
- self.name = name
-
- def __get__(self, obj, tp):
- result = self._resolve()
- setattr(obj, self.name, result) # Invokes __set__.
- try:
- # This is a bit ugly, but it avoids running this again by
- # removing this descriptor.
- delattr(obj.__class__, self.name)
- except AttributeError:
- pass
- return result
-
-
-class MovedModule(_LazyDescr):
-
- def __init__(self, name, old, new=None):
- super(MovedModule, self).__init__(name)
- if PY3:
- if new is None:
- new = name
- self.mod = new
- else:
- self.mod = old
-
- def _resolve(self):
- return _import_module(self.mod)
-
- def __getattr__(self, attr):
- _module = self._resolve()
- value = getattr(_module, attr)
- setattr(self, attr, value)
- return value
-
-
-class _LazyModule(types.ModuleType):
-
- def __init__(self, name):
- super(_LazyModule, self).__init__(name)
- self.__doc__ = self.__class__.__doc__
-
- def __dir__(self):
- attrs = ["__doc__", "__name__"]
- attrs += [attr.name for attr in self._moved_attributes]
- return attrs
-
- # Subclasses should override this
- _moved_attributes = []
-
-
-class MovedAttribute(_LazyDescr):
-
- def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
- super(MovedAttribute, self).__init__(name)
- if PY3:
- if new_mod is None:
- new_mod = name
- self.mod = new_mod
- if new_attr is None:
- if old_attr is None:
- new_attr = name
- else:
- new_attr = old_attr
- self.attr = new_attr
- else:
- self.mod = old_mod
- if old_attr is None:
- old_attr = name
- self.attr = old_attr
-
- def _resolve(self):
- module = _import_module(self.mod)
- return getattr(module, self.attr)
-
-
-class _SixMetaPathImporter(object):
-
- """
- A meta path importer to import six.moves and its submodules.
-
- This class implements a PEP302 finder and loader. It should be compatible
- with Python 2.5 and all existing versions of Python3
- """
-
- def __init__(self, six_module_name):
- self.name = six_module_name
- self.known_modules = {}
-
- def _add_module(self, mod, *fullnames):
- for fullname in fullnames:
- self.known_modules[self.name + "." + fullname] = mod
-
- def _get_module(self, fullname):
- return self.known_modules[self.name + "." + fullname]
-
- def find_module(self, fullname, path=None):
- if fullname in self.known_modules:
- return self
- return None
-
- def __get_module(self, fullname):
- try:
- return self.known_modules[fullname]
- except KeyError:
- raise ImportError("This loader does not know module " + fullname)
-
- def load_module(self, fullname):
- try:
- # in case of a reload
- return sys.modules[fullname]
- except KeyError:
- pass
- mod = self.__get_module(fullname)
- if isinstance(mod, MovedModule):
- mod = mod._resolve()
- else:
- mod.__loader__ = self
- sys.modules[fullname] = mod
- return mod
-
- def is_package(self, fullname):
- """
- Return true, if the named module is a package.
-
- We need this method to get correct spec objects with
- Python 3.4 (see PEP451)
- """
- return hasattr(self.__get_module(fullname), "__path__")
-
- def get_code(self, fullname):
- """Return None
-
- Required, if is_package is implemented"""
- self.__get_module(fullname) # eventually raises ImportError
- return None
- get_source = get_code # same as get_code
-
-_importer = _SixMetaPathImporter(__name__)
-
-
-class _MovedItems(_LazyModule):
-
- """Lazy loading of moved objects"""
- __path__ = [] # mark as package
-
-
-_moved_attributes = [
- MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
- MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
- MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
- MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
- MovedAttribute("intern", "__builtin__", "sys"),
- MovedAttribute("map", "itertools", "builtins", "imap", "map"),
- MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
- MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
- MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
- MovedAttribute("reduce", "__builtin__", "functools"),
- MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
- MovedAttribute("StringIO", "StringIO", "io"),
- MovedAttribute("UserDict", "UserDict", "collections"),
- MovedAttribute("UserList", "UserList", "collections"),
- MovedAttribute("UserString", "UserString", "collections"),
- MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
- MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
- MovedModule("builtins", "__builtin__"),
- MovedModule("configparser", "ConfigParser"),
- MovedModule("copyreg", "copy_reg"),
- MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
- MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
- MovedModule("http_cookies", "Cookie", "http.cookies"),
- MovedModule("html_entities", "htmlentitydefs", "html.entities"),
- MovedModule("html_parser", "HTMLParser", "html.parser"),
- MovedModule("http_client", "httplib", "http.client"),
- MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
- MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
- MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
- MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
- MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
- MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
- MovedModule("cPickle", "cPickle", "pickle"),
- MovedModule("queue", "Queue"),
- MovedModule("reprlib", "repr"),
- MovedModule("socketserver", "SocketServer"),
- MovedModule("_thread", "thread", "_thread"),
- MovedModule("tkinter", "Tkinter"),
- MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
- MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
- MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
- MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
- MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
- MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
- MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
- MovedModule("tkinter_colorchooser", "tkColorChooser",
- "tkinter.colorchooser"),
- MovedModule("tkinter_commondialog", "tkCommonDialog",
- "tkinter.commondialog"),
- MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_font", "tkFont", "tkinter.font"),
- MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
- MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
- "tkinter.simpledialog"),
- MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
- MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
- MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
- MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
- MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
- MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
-]
-# Add windows specific modules.
-if sys.platform == "win32":
- _moved_attributes += [
- MovedModule("winreg", "_winreg"),
- ]
-
-for attr in _moved_attributes:
- setattr(_MovedItems, attr.name, attr)
- if isinstance(attr, MovedModule):
- _importer._add_module(attr, "moves." + attr.name)
-del attr
-
-_MovedItems._moved_attributes = _moved_attributes
-
-moves = _MovedItems(__name__ + ".moves")
-_importer._add_module(moves, "moves")
-
-
-class Module_six_moves_urllib_parse(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_parse"""
-
-
-_urllib_parse_moved_attributes = [
- MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
- MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
- MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
- MovedAttribute("urljoin", "urlparse", "urllib.parse"),
- MovedAttribute("urlparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
- MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
- MovedAttribute("quote", "urllib", "urllib.parse"),
- MovedAttribute("quote_plus", "urllib", "urllib.parse"),
- MovedAttribute("unquote", "urllib", "urllib.parse"),
- MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
- MovedAttribute("urlencode", "urllib", "urllib.parse"),
- MovedAttribute("splitquery", "urllib", "urllib.parse"),
- MovedAttribute("splittag", "urllib", "urllib.parse"),
- MovedAttribute("splituser", "urllib", "urllib.parse"),
- MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
- MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
- MovedAttribute("uses_params", "urlparse", "urllib.parse"),
- MovedAttribute("uses_query", "urlparse", "urllib.parse"),
- MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
-]
-for attr in _urllib_parse_moved_attributes:
- setattr(Module_six_moves_urllib_parse, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
- "moves.urllib_parse", "moves.urllib.parse")
-
-
-class Module_six_moves_urllib_error(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_error"""
-
-
-_urllib_error_moved_attributes = [
- MovedAttribute("URLError", "urllib2", "urllib.error"),
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
- MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
-]
-for attr in _urllib_error_moved_attributes:
- setattr(Module_six_moves_urllib_error, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
- "moves.urllib_error", "moves.urllib.error")
-
-
-class Module_six_moves_urllib_request(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_request"""
-
-
-_urllib_request_moved_attributes = [
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
- MovedAttribute("pathname2url", "urllib", "urllib.request"),
- MovedAttribute("url2pathname", "urllib", "urllib.request"),
- MovedAttribute("getproxies", "urllib", "urllib.request"),
- MovedAttribute("Request", "urllib2", "urllib.request"),
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
- MovedAttribute("urlretrieve", "urllib", "urllib.request"),
- MovedAttribute("urlcleanup", "urllib", "urllib.request"),
- MovedAttribute("URLopener", "urllib", "urllib.request"),
- MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
- MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
-]
-for attr in _urllib_request_moved_attributes:
- setattr(Module_six_moves_urllib_request, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
- "moves.urllib_request", "moves.urllib.request")
-
-
-class Module_six_moves_urllib_response(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_response"""
-
-
-_urllib_response_moved_attributes = [
- MovedAttribute("addbase", "urllib", "urllib.response"),
- MovedAttribute("addclosehook", "urllib", "urllib.response"),
- MovedAttribute("addinfo", "urllib", "urllib.response"),
- MovedAttribute("addinfourl", "urllib", "urllib.response"),
-]
-for attr in _urllib_response_moved_attributes:
- setattr(Module_six_moves_urllib_response, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
- "moves.urllib_response", "moves.urllib.response")
-
-
-class Module_six_moves_urllib_robotparser(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_robotparser"""
-
-
-_urllib_robotparser_moved_attributes = [
- MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
-]
-for attr in _urllib_robotparser_moved_attributes:
- setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
- "moves.urllib_robotparser", "moves.urllib.robotparser")
-
-
-class Module_six_moves_urllib(types.ModuleType):
-
- """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
- __path__ = [] # mark as package
- parse = _importer._get_module("moves.urllib_parse")
- error = _importer._get_module("moves.urllib_error")
- request = _importer._get_module("moves.urllib_request")
- response = _importer._get_module("moves.urllib_response")
- robotparser = _importer._get_module("moves.urllib_robotparser")
-
- def __dir__(self):
- return ['parse', 'error', 'request', 'response', 'robotparser']
-
-_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
- "moves.urllib")
-
-
-def add_move(move):
- """Add an item to six.moves."""
- setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
- """Remove item from six.moves."""
- try:
- delattr(_MovedItems, name)
- except AttributeError:
- try:
- del moves.__dict__[name]
- except KeyError:
- raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
- _meth_func = "__func__"
- _meth_self = "__self__"
-
- _func_closure = "__closure__"
- _func_code = "__code__"
- _func_defaults = "__defaults__"
- _func_globals = "__globals__"
-else:
- _meth_func = "im_func"
- _meth_self = "im_self"
-
- _func_closure = "func_closure"
- _func_code = "func_code"
- _func_defaults = "func_defaults"
- _func_globals = "func_globals"
-
-
-try:
- advance_iterator = next
-except NameError:
- def advance_iterator(it):
- return it.next()
-next = advance_iterator
-
-
-try:
- callable = callable
-except NameError:
- def callable(obj):
- return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-
-
-if PY3:
- def get_unbound_function(unbound):
- return unbound
-
- create_bound_method = types.MethodType
-
- def create_unbound_method(func, cls):
- return func
-
- Iterator = object
-else:
- def get_unbound_function(unbound):
- return unbound.im_func
-
- def create_bound_method(func, obj):
- return types.MethodType(func, obj, obj.__class__)
-
- def create_unbound_method(func, cls):
- return types.MethodType(func, None, cls)
-
- class Iterator(object):
-
- def next(self):
- return type(self).__next__(self)
-
- callable = callable
-_add_doc(get_unbound_function,
- """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_closure = operator.attrgetter(_func_closure)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-get_function_globals = operator.attrgetter(_func_globals)
-
-
-if PY3:
- def iterkeys(d, **kw):
- return iter(d.keys(**kw))
-
- def itervalues(d, **kw):
- return iter(d.values(**kw))
-
- def iteritems(d, **kw):
- return iter(d.items(**kw))
-
- def iterlists(d, **kw):
- return iter(d.lists(**kw))
-
- viewkeys = operator.methodcaller("keys")
-
- viewvalues = operator.methodcaller("values")
-
- viewitems = operator.methodcaller("items")
-else:
- def iterkeys(d, **kw):
- return d.iterkeys(**kw)
-
- def itervalues(d, **kw):
- return d.itervalues(**kw)
-
- def iteritems(d, **kw):
- return d.iteritems(**kw)
-
- def iterlists(d, **kw):
- return d.iterlists(**kw)
-
- viewkeys = operator.methodcaller("viewkeys")
-
- viewvalues = operator.methodcaller("viewvalues")
-
- viewitems = operator.methodcaller("viewitems")
-
-_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
-_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
-_add_doc(iteritems,
- "Return an iterator over the (key, value) pairs of a dictionary.")
-_add_doc(iterlists,
- "Return an iterator over the (key, [values]) pairs of a dictionary.")
-
-
-if PY3:
- def b(s):
- return s.encode("latin-1")
-
- def u(s):
- return s
- unichr = chr
- import struct
- int2byte = struct.Struct(">B").pack
- del struct
- byte2int = operator.itemgetter(0)
- indexbytes = operator.getitem
- iterbytes = iter
- import io
- StringIO = io.StringIO
- BytesIO = io.BytesIO
- _assertCountEqual = "assertCountEqual"
- if sys.version_info[1] <= 1:
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
- else:
- _assertRaisesRegex = "assertRaisesRegex"
- _assertRegex = "assertRegex"
-else:
- def b(s):
- return s
- # Workaround for standalone backslash
-
- def u(s):
- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
- unichr = unichr
- int2byte = chr
-
- def byte2int(bs):
- return ord(bs[0])
-
- def indexbytes(buf, i):
- return ord(buf[i])
- iterbytes = functools.partial(itertools.imap, ord)
- import StringIO
- StringIO = BytesIO = StringIO.StringIO
- _assertCountEqual = "assertItemsEqual"
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-def assertCountEqual(self, *args, **kwargs):
- return getattr(self, _assertCountEqual)(*args, **kwargs)
-
-
-def assertRaisesRegex(self, *args, **kwargs):
- return getattr(self, _assertRaisesRegex)(*args, **kwargs)
-
-
-def assertRegex(self, *args, **kwargs):
- return getattr(self, _assertRegex)(*args, **kwargs)
-
-
-if PY3:
- exec_ = getattr(moves.builtins, "exec")
-
- def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
-else:
- def exec_(_code_, _globs_=None, _locs_=None):
- """Execute code in a namespace."""
- if _globs_ is None:
- frame = sys._getframe(1)
- _globs_ = frame.f_globals
- if _locs_ is None:
- _locs_ = frame.f_locals
- del frame
- elif _locs_ is None:
- _locs_ = _globs_
- exec("""exec _code_ in _globs_, _locs_""")
-
- exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
-""")
-
-
-if sys.version_info[:2] == (3, 2):
- exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
-""")
-else:
- def raise_from(value, from_value):
- raise value
-
-
-print_ = getattr(moves.builtins, "print", None)
-if print_ is None:
- def print_(*args, **kwargs):
- """The new-style print function for Python 2.4 and 2.5."""
- fp = kwargs.pop("file", sys.stdout)
- if fp is None:
- return
-
- def write(data):
- if not isinstance(data, basestring):
- data = str(data)
- # If the file has an encoding, encode unicode with it.
- if (isinstance(fp, file) and
- isinstance(data, unicode) and
- fp.encoding is not None):
- errors = getattr(fp, "errors", None)
- if errors is None:
- errors = "strict"
- data = data.encode(fp.encoding, errors)
- fp.write(data)
- want_unicode = False
- sep = kwargs.pop("sep", None)
- if sep is not None:
- if isinstance(sep, unicode):
- want_unicode = True
- elif not isinstance(sep, str):
- raise TypeError("sep must be None or a string")
- end = kwargs.pop("end", None)
- if end is not None:
- if isinstance(end, unicode):
- want_unicode = True
- elif not isinstance(end, str):
- raise TypeError("end must be None or a string")
- if kwargs:
- raise TypeError("invalid keyword arguments to print()")
- if not want_unicode:
- for arg in args:
- if isinstance(arg, unicode):
- want_unicode = True
- break
- if want_unicode:
- newline = unicode("\n")
- space = unicode(" ")
- else:
- newline = "\n"
- space = " "
- if sep is None:
- sep = space
- if end is None:
- end = newline
- for i, arg in enumerate(args):
- if i:
- write(sep)
- write(arg)
- write(end)
-if sys.version_info[:2] < (3, 3):
- _print = print_
-
- def print_(*args, **kwargs):
- fp = kwargs.get("file", sys.stdout)
- flush = kwargs.pop("flush", False)
- _print(*args, **kwargs)
- if flush and fp is not None:
- fp.flush()
-
-_add_doc(reraise, """Reraise an exception.""")
-
-if sys.version_info[0:2] < (3, 4):
- def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
- updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
-else:
- wraps = functools.wraps
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a dummy
- # metaclass for one level of class instantiation that replaces itself with
- # the actual metaclass.
- class metaclass(meta):
-
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-def add_metaclass(metaclass):
- """Class decorator for creating a class with a metaclass."""
- def wrapper(cls):
- orig_vars = cls.__dict__.copy()
- slots = orig_vars.get('__slots__')
- if slots is not None:
- if isinstance(slots, str):
- slots = [slots]
- for slots_var in slots:
- orig_vars.pop(slots_var)
- orig_vars.pop('__dict__', None)
- orig_vars.pop('__weakref__', None)
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
- return wrapper
-
-
-def python_2_unicode_compatible(klass):
- """
- A decorator that defines __unicode__ and __str__ methods under Python 2.
- Under Python 3 it does nothing.
-
- To support Python 2 and 3 with a single code base, define a __str__ method
- returning text and apply this decorator to the class.
- """
- if PY2:
- if '__str__' not in klass.__dict__:
- raise ValueError("@python_2_unicode_compatible cannot be applied "
- "to %s because it doesn't define __str__()." %
- klass.__name__)
- klass.__unicode__ = klass.__str__
- klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
- return klass
-
-
-# Complete the moves implementation.
-# This code is at the end of this module to speed up module loading.
-# Turn this module into a package.
-__path__ = [] # required for PEP 302 and PEP 451
-__package__ = __name__ # see PEP 366 @ReservedAssignment
-if globals().get("__spec__") is not None:
- __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
-# Remove other six meta path importers, since they cause problems. This can
-# happen if six is removed from sys.modules and then reloaded. (Setuptools does
-# this for some reason.)
-if sys.meta_path:
- for i, importer in enumerate(sys.meta_path):
- # Here's some real nastiness: Another "instance" of the six module might
- # be floating around. Therefore, we can't use isinstance() to check for
- # the six meta path importer, since the other six instance will have
- # inserted an importer with different class.
- if (type(importer).__name__ == "_SixMetaPathImporter" and
- importer.name == __name__):
- del sys.meta_path[i]
- break
- del i, importer
-# Finally, add the importer to the meta path import hook.
-sys.meta_path.append(_importer)
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
deleted file mode 100644
index d6594eb26..000000000
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import sys
-
-try:
- # Our match_hostname function is the same as 3.5's, so we only want to
- # import the match_hostname function if it's at least that good.
- if sys.version_info < (3, 5):
- raise ImportError("Fallback to vendored code")
-
- from ssl import CertificateError, match_hostname
-except ImportError:
- try:
- # Backport of the function from a pypi module
- from backports.ssl_match_hostname import CertificateError, match_hostname
- except ImportError:
- # Our vendored copy
- from ._implementation import CertificateError, match_hostname
-
-# Not needed, but documenting what we provide.
-__all__ = ('CertificateError', 'match_hostname')
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
deleted file mode 100644
index 1fd42f38a..000000000
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
-
-# Note: This file is under the PSF license as the code comes from the python
-# stdlib. http://docs.python.org/3/license.html
-
-import re
-import sys
-
-# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
-# system, use it to handle IPAddress ServerAltnames (this was added in
-# python-3.5) otherwise only do DNS matching. This allows
-# backports.ssl_match_hostname to continue to be used all the way back to
-# python-2.4.
-try:
- import ipaddress
-except ImportError:
- ipaddress = None
-
-__version__ = '3.5.0.1'
-
-
-class CertificateError(ValueError):
- pass
-
-
-def _dnsname_match(dn, hostname, max_wildcards=1):
- """Matching according to RFC 6125, section 6.4.3
-
- http://tools.ietf.org/html/rfc6125#section-6.4.3
- """
- pats = []
- if not dn:
- return False
-
- # Ported from python3-syntax:
- # leftmost, *remainder = dn.split(r'.')
- parts = dn.split(r'.')
- leftmost = parts[0]
- remainder = parts[1:]
-
- wildcards = leftmost.count('*')
- if wildcards > max_wildcards:
- # Issue #17980: avoid denials of service by refusing more
- # than one wildcard per fragment. A survey of established
- # policy among SSL implementations showed it to be a
- # reasonable choice.
- raise CertificateError(
- "too many wildcards in certificate DNS name: " + repr(dn))
-
- # speed up common case w/o wildcards
- if not wildcards:
- return dn.lower() == hostname.lower()
-
- # RFC 6125, section 6.4.3, subitem 1.
- # The client SHOULD NOT attempt to match a presented identifier in which
- # the wildcard character comprises a label other than the left-most label.
- if leftmost == '*':
- # When '*' is a fragment by itself, it matches a non-empty dotless
- # fragment.
- pats.append('[^.]+')
- elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
- # RFC 6125, section 6.4.3, subitem 3.
- # The client SHOULD NOT attempt to match a presented identifier
- # where the wildcard character is embedded within an A-label or
- # U-label of an internationalized domain name.
- pats.append(re.escape(leftmost))
- else:
- # Otherwise, '*' matches any dotless string, e.g. www*
- pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
-
- # add the remaining fragments, ignore any wildcards
- for frag in remainder:
- pats.append(re.escape(frag))
-
- pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
- return pat.match(hostname)
-
-
-def _to_unicode(obj):
- if isinstance(obj, str) and sys.version_info < (3,):
- obj = unicode(obj, encoding='ascii', errors='strict')
- return obj
-
-def _ipaddress_match(ipname, host_ip):
- """Exact matching of IP addresses.
-
- RFC 6125 explicitly doesn't define an algorithm for this
- (section 1.7.2 - "Out of Scope").
- """
- # OpenSSL may add a trailing newline to a subjectAltName's IP address
- # Divergence from upstream: ipaddress can't handle byte str
- ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
- return ip == host_ip
-
-
-def match_hostname(cert, hostname):
- """Verify that *cert* (in decoded format as returned by
- SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
- rules are followed, but IP addresses are not accepted for *hostname*.
-
- CertificateError is raised on failure. On success, the function
- returns nothing.
- """
- if not cert:
- raise ValueError("empty or no certificate, match_hostname needs a "
- "SSL socket or SSL context with either "
- "CERT_OPTIONAL or CERT_REQUIRED")
- try:
- # Divergence from upstream: ipaddress can't handle byte str
- host_ip = ipaddress.ip_address(_to_unicode(hostname))
- except ValueError:
- # Not an IP address (common case)
- host_ip = None
- except UnicodeError:
- # Divergence from upstream: Have to deal with ipaddress not taking
- # byte strings. addresses should be all ascii, so we consider it not
- # an ipaddress in this case
- host_ip = None
- except AttributeError:
- # Divergence from upstream: Make ipaddress library optional
- if ipaddress is None:
- host_ip = None
- else:
- raise
- dnsnames = []
- san = cert.get('subjectAltName', ())
- for key, value in san:
- if key == 'DNS':
- if host_ip is None and _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- elif key == 'IP Address':
- if host_ip is not None and _ipaddress_match(value, host_ip):
- return
- dnsnames.append(value)
- if not dnsnames:
- # The subject is only checked when there is no dNSName entry
- # in subjectAltName
- for sub in cert.get('subject', ()):
- for key, value in sub:
- # XXX according to RFC 2818, the most specific Common Name
- # must be used.
- if key == 'commonName':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if len(dnsnames) > 1:
- raise CertificateError("hostname %r "
- "doesn't match either of %s"
- % (hostname, ', '.join(map(repr, dnsnames))))
- elif len(dnsnames) == 1:
- raise CertificateError("hostname %r "
- "doesn't match %r"
- % (hostname, dnsnames[0]))
- else:
- raise CertificateError("no appropriate commonName or "
- "subjectAltName fields were found")
diff --git a/python.d/python_modules/urllib3/poolmanager.py b/python.d/python_modules/urllib3/poolmanager.py
deleted file mode 100644
index 4ae91744d..000000000
--- a/python.d/python_modules/urllib3/poolmanager.py
+++ /dev/null
@@ -1,440 +0,0 @@
-from __future__ import absolute_import
-import collections
-import functools
-import logging
-
-from ._collections import RecentlyUsedContainer
-from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
-from .connectionpool import port_by_scheme
-from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
-from .packages.six.moves.urllib.parse import urljoin
-from .request import RequestMethods
-from .util.url import parse_url
-from .util.retry import Retry
-
-
-__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
-
-
-log = logging.getLogger(__name__)
-
-SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
- 'ssl_version', 'ca_cert_dir', 'ssl_context')
-
-# All known keyword arguments that could be provided to the pool manager, its
-# pools, or the underlying connections. This is used to construct a pool key.
-_key_fields = (
- 'key_scheme', # str
- 'key_host', # str
- 'key_port', # int
- 'key_timeout', # int or float or Timeout
- 'key_retries', # int or Retry
- 'key_strict', # bool
- 'key_block', # bool
- 'key_source_address', # str
- 'key_key_file', # str
- 'key_cert_file', # str
- 'key_cert_reqs', # str
- 'key_ca_certs', # str
- 'key_ssl_version', # str
- 'key_ca_cert_dir', # str
- 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
- 'key_maxsize', # int
- 'key_headers', # dict
- 'key__proxy', # parsed proxy url
- 'key__proxy_headers', # dict
- 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
- 'key__socks_options', # dict
- 'key_assert_hostname', # bool or string
- 'key_assert_fingerprint', # str
-)
-
-#: The namedtuple class used to construct keys for the connection pool.
-#: All custom key schemes should include the fields in this key at a minimum.
-PoolKey = collections.namedtuple('PoolKey', _key_fields)
-
-
-def _default_key_normalizer(key_class, request_context):
- """
- Create a pool key out of a request context dictionary.
-
- According to RFC 3986, both the scheme and host are case-insensitive.
- Therefore, this function normalizes both before constructing the pool
- key for an HTTPS request. If you wish to change this behaviour, provide
- alternate callables to ``key_fn_by_scheme``.
-
- :param key_class:
- The class to use when constructing the key. This should be a namedtuple
- with the ``scheme`` and ``host`` keys at a minimum.
- :type key_class: namedtuple
- :param request_context:
- A dictionary-like object that contain the context for a request.
- :type request_context: dict
-
- :return: A namedtuple that can be used as a connection pool key.
- :rtype: PoolKey
- """
- # Since we mutate the dictionary, make a copy first
- context = request_context.copy()
- context['scheme'] = context['scheme'].lower()
- context['host'] = context['host'].lower()
-
- # These are both dictionaries and need to be transformed into frozensets
- for key in ('headers', '_proxy_headers', '_socks_options'):
- if key in context and context[key] is not None:
- context[key] = frozenset(context[key].items())
-
- # The socket_options key may be a list and needs to be transformed into a
- # tuple.
- socket_opts = context.get('socket_options')
- if socket_opts is not None:
- context['socket_options'] = tuple(socket_opts)
-
- # Map the kwargs to the names in the namedtuple - this is necessary since
- # namedtuples can't have fields starting with '_'.
- for key in list(context.keys()):
- context['key_' + key] = context.pop(key)
-
- # Default to ``None`` for keys missing from the context
- for field in key_class._fields:
- if field not in context:
- context[field] = None
-
- return key_class(**context)
-
-
-#: A dictionary that maps a scheme to a callable that creates a pool key.
-#: This can be used to alter the way pool keys are constructed, if desired.
-#: Each PoolManager makes a copy of this dictionary so they can be configured
-#: globally here, or individually on the instance.
-key_fn_by_scheme = {
- 'http': functools.partial(_default_key_normalizer, PoolKey),
- 'https': functools.partial(_default_key_normalizer, PoolKey),
-}
-
-pool_classes_by_scheme = {
- 'http': HTTPConnectionPool,
- 'https': HTTPSConnectionPool,
-}
-
-
-class PoolManager(RequestMethods):
- """
- Allows for arbitrary requests while transparently keeping track of
- necessary connection pools for you.
-
- :param num_pools:
- Number of connection pools to cache before discarding the least
- recently used pool.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param \\**connection_pool_kw:
- Additional parameters are used to create fresh
- :class:`urllib3.connectionpool.ConnectionPool` instances.
-
- Example::
-
- >>> manager = PoolManager(num_pools=2)
- >>> r = manager.request('GET', 'http://google.com/')
- >>> r = manager.request('GET', 'http://google.com/mail')
- >>> r = manager.request('GET', 'http://yahoo.com/')
- >>> len(manager.pools)
- 2
-
- """
-
- proxy = None
-
- def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
- RequestMethods.__init__(self, headers)
- self.connection_pool_kw = connection_pool_kw
- self.pools = RecentlyUsedContainer(num_pools,
- dispose_func=lambda p: p.close())
-
- # Locally set the pool classes and keys so other PoolManagers can
- # override them.
- self.pool_classes_by_scheme = pool_classes_by_scheme
- self.key_fn_by_scheme = key_fn_by_scheme.copy()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.clear()
- # Return False to re-raise any potential exceptions
- return False
-
- def _new_pool(self, scheme, host, port, request_context=None):
- """
- Create a new :class:`ConnectionPool` based on host, port, scheme, and
- any additional pool keyword arguments.
-
- If ``request_context`` is provided, it is provided as keyword arguments
- to the pool class used. This method is used to actually create the
- connection pools handed out by :meth:`connection_from_url` and
- companion methods. It is intended to be overridden for customization.
- """
- pool_cls = self.pool_classes_by_scheme[scheme]
- if request_context is None:
- request_context = self.connection_pool_kw.copy()
-
- # Although the context has everything necessary to create the pool,
- # this function has historically only used the scheme, host, and port
- # in the positional args. When an API change is acceptable these can
- # be removed.
- for key in ('scheme', 'host', 'port'):
- request_context.pop(key, None)
-
- if scheme == 'http':
- for kw in SSL_KEYWORDS:
- request_context.pop(kw, None)
-
- return pool_cls(host, port, **request_context)
-
- def clear(self):
- """
- Empty our store of pools and direct them all to close.
-
- This will not affect in-flight connections, but they will not be
- re-used after completion.
- """
- self.pools.clear()
-
- def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
- """
- Get a :class:`ConnectionPool` based on the host, port, and scheme.
-
- If ``port`` isn't given, it will be derived from the ``scheme`` using
- ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
- provided, it is merged with the instance's ``connection_pool_kw``
- variable and used to create the new connection pool, if one is
- needed.
- """
-
- if not host:
- raise LocationValueError("No host specified.")
-
- request_context = self._merge_pool_kwargs(pool_kwargs)
- request_context['scheme'] = scheme or 'http'
- if not port:
- port = port_by_scheme.get(request_context['scheme'].lower(), 80)
- request_context['port'] = port
- request_context['host'] = host
-
- return self.connection_from_context(request_context)
-
- def connection_from_context(self, request_context):
- """
- Get a :class:`ConnectionPool` based on the request context.
-
- ``request_context`` must at least contain the ``scheme`` key and its
- value must be a key in ``key_fn_by_scheme`` instance variable.
- """
- scheme = request_context['scheme'].lower()
- pool_key_constructor = self.key_fn_by_scheme[scheme]
- pool_key = pool_key_constructor(request_context)
-
- return self.connection_from_pool_key(pool_key, request_context=request_context)
-
- def connection_from_pool_key(self, pool_key, request_context=None):
- """
- Get a :class:`ConnectionPool` based on the provided pool key.
-
- ``pool_key`` should be a namedtuple that only contains immutable
- objects. At a minimum it must have the ``scheme``, ``host``, and
- ``port`` fields.
- """
- with self.pools.lock:
- # If the scheme, host, or port doesn't match existing open
- # connections, open a new ConnectionPool.
- pool = self.pools.get(pool_key)
- if pool:
- return pool
-
- # Make a fresh ConnectionPool of the desired type
- scheme = request_context['scheme']
- host = request_context['host']
- port = request_context['port']
- pool = self._new_pool(scheme, host, port, request_context=request_context)
- self.pools[pool_key] = pool
-
- return pool
-
- def connection_from_url(self, url, pool_kwargs=None):
- """
- Similar to :func:`urllib3.connectionpool.connection_from_url`.
-
- If ``pool_kwargs`` is not provided and a new pool needs to be
- constructed, ``self.connection_pool_kw`` is used to initialize
- the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
- is provided, it is used instead. Note that if a new pool does not
- need to be created for the request, the provided ``pool_kwargs`` are
- not used.
- """
- u = parse_url(url)
- return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
- pool_kwargs=pool_kwargs)
-
- def _merge_pool_kwargs(self, override):
- """
- Merge a dictionary of override values for self.connection_pool_kw.
-
- This does not modify self.connection_pool_kw and returns a new dict.
- Any keys in the override dictionary with a value of ``None`` are
- removed from the merged dictionary.
- """
- base_pool_kwargs = self.connection_pool_kw.copy()
- if override:
- for key, value in override.items():
- if value is None:
- try:
- del base_pool_kwargs[key]
- except KeyError:
- pass
- else:
- base_pool_kwargs[key] = value
- return base_pool_kwargs
-
- def urlopen(self, method, url, redirect=True, **kw):
- """
- Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
- with custom cross-host redirect logic and only sends the request-uri
- portion of the ``url``.
-
- The given ``url`` parameter must be absolute, such that an appropriate
- :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
- """
- u = parse_url(url)
- conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
-
- kw['assert_same_host'] = False
- kw['redirect'] = False
- if 'headers' not in kw:
- kw['headers'] = self.headers
-
- if self.proxy is not None and u.scheme == "http":
- response = conn.urlopen(method, url, **kw)
- else:
- response = conn.urlopen(method, u.request_uri, **kw)
-
- redirect_location = redirect and response.get_redirect_location()
- if not redirect_location:
- return response
-
- # Support relative URLs for redirecting.
- redirect_location = urljoin(url, redirect_location)
-
- # RFC 7231, Section 6.4.4
- if response.status == 303:
- method = 'GET'
-
- retries = kw.get('retries')
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect)
-
- try:
- retries = retries.increment(method, url, response=response, _pool=conn)
- except MaxRetryError:
- if retries.raise_on_redirect:
- raise
- return response
-
- kw['retries'] = retries
- kw['redirect'] = redirect
-
- log.info("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(method, redirect_location, **kw)
-
-
-class ProxyManager(PoolManager):
- """
- Behaves just like :class:`PoolManager`, but sends all requests through
- the defined proxy, using the CONNECT method for HTTPS URLs.
-
- :param proxy_url:
- The URL of the proxy to be used.
-
- :param proxy_headers:
- A dictionary contaning headers that will be sent to the proxy. In case
- of HTTP they are being sent with each request, while in the
- HTTPS/CONNECT case they are sent only once. Could be used for proxy
- authentication.
-
- Example:
- >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
- >>> r1 = proxy.request('GET', 'http://google.com/')
- >>> r2 = proxy.request('GET', 'http://httpbin.org/')
- >>> len(proxy.pools)
- 1
- >>> r3 = proxy.request('GET', 'https://httpbin.org/')
- >>> r4 = proxy.request('GET', 'https://twitter.com/')
- >>> len(proxy.pools)
- 3
-
- """
-
- def __init__(self, proxy_url, num_pools=10, headers=None,
- proxy_headers=None, **connection_pool_kw):
-
- if isinstance(proxy_url, HTTPConnectionPool):
- proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
- proxy_url.port)
- proxy = parse_url(proxy_url)
- if not proxy.port:
- port = port_by_scheme.get(proxy.scheme, 80)
- proxy = proxy._replace(port=port)
-
- if proxy.scheme not in ("http", "https"):
- raise ProxySchemeUnknown(proxy.scheme)
-
- self.proxy = proxy
- self.proxy_headers = proxy_headers or {}
-
- connection_pool_kw['_proxy'] = self.proxy
- connection_pool_kw['_proxy_headers'] = self.proxy_headers
-
- super(ProxyManager, self).__init__(
- num_pools, headers, **connection_pool_kw)
-
- def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
- if scheme == "https":
- return super(ProxyManager, self).connection_from_host(
- host, port, scheme, pool_kwargs=pool_kwargs)
-
- return super(ProxyManager, self).connection_from_host(
- self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
-
- def _set_proxy_headers(self, url, headers=None):
- """
- Sets headers needed by proxies: specifically, the Accept and Host
- headers. Only sets headers not provided by the user.
- """
- headers_ = {'Accept': '*/*'}
-
- netloc = parse_url(url).netloc
- if netloc:
- headers_['Host'] = netloc
-
- if headers:
- headers_.update(headers)
- return headers_
-
- def urlopen(self, method, url, redirect=True, **kw):
- "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
- u = parse_url(url)
-
- if u.scheme == "http":
- # For proxied HTTPS requests, httplib sets the necessary headers
- # on the CONNECT to the proxy. For HTTP, we'll definitely
- # need to set 'Host' at the very least.
- headers = kw.get('headers', self.headers)
- kw['headers'] = self._set_proxy_headers(url, headers)
-
- return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
-
-
-def proxy_from_url(url, **kw):
- return ProxyManager(proxy_url=url, **kw)
diff --git a/python.d/python_modules/urllib3/request.py b/python.d/python_modules/urllib3/request.py
deleted file mode 100644
index c0fddff04..000000000
--- a/python.d/python_modules/urllib3/request.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from __future__ import absolute_import
-
-from .filepost import encode_multipart_formdata
-from .packages.six.moves.urllib.parse import urlencode
-
-
-__all__ = ['RequestMethods']
-
-
-class RequestMethods(object):
- """
- Convenience mixin for classes who implement a :meth:`urlopen` method, such
- as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
- :class:`~urllib3.poolmanager.PoolManager`.
-
- Provides behavior for making common types of HTTP request methods and
- decides which type of request field encoding to use.
-
- Specifically,
-
- :meth:`.request_encode_url` is for sending requests whose fields are
- encoded in the URL (such as GET, HEAD, DELETE).
-
- :meth:`.request_encode_body` is for sending requests whose fields are
- encoded in the *body* of the request using multipart or www-form-urlencoded
- (such as for POST, PUT, PATCH).
-
- :meth:`.request` is for making any kind of request, it will look up the
- appropriate encoding format and use one of the above two methods to make
- the request.
-
- Initializer parameters:
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
- """
-
- _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
-
- def __init__(self, headers=None):
- self.headers = headers or {}
-
- def urlopen(self, method, url, body=None, headers=None,
- encode_multipart=True, multipart_boundary=None,
- **kw): # Abstract
- raise NotImplemented("Classes extending RequestMethods must implement "
- "their own ``urlopen`` method.")
-
- def request(self, method, url, fields=None, headers=None, **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the appropriate encoding of
- ``fields`` based on the ``method`` used.
-
- This is a convenience method that requires the least amount of manual
- effort. It can be used in most situations, while still having the
- option to drop down to more specific methods when necessary, such as
- :meth:`request_encode_url`, :meth:`request_encode_body`,
- or even the lowest level :meth:`urlopen`.
- """
- method = method.upper()
-
- if method in self._encode_url_methods:
- return self.request_encode_url(method, url, fields=fields,
- headers=headers,
- **urlopen_kw)
- else:
- return self.request_encode_body(method, url, fields=fields,
- headers=headers,
- **urlopen_kw)
-
- def request_encode_url(self, method, url, fields=None, headers=None,
- **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
- the url. This is useful for request methods like GET, HEAD, DELETE, etc.
- """
- if headers is None:
- headers = self.headers
-
- extra_kw = {'headers': headers}
- extra_kw.update(urlopen_kw)
-
- if fields:
- url += '?' + urlencode(fields)
-
- return self.urlopen(method, url, **extra_kw)
-
- def request_encode_body(self, method, url, fields=None, headers=None,
- encode_multipart=True, multipart_boundary=None,
- **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
- the body. This is useful for request methods like POST, PUT, PATCH, etc.
-
- When ``encode_multipart=True`` (default), then
- :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
- the payload with the appropriate content type. Otherwise
- :meth:`urllib.urlencode` is used with the
- 'application/x-www-form-urlencoded' content type.
-
- Multipart encoding must be used when posting files, and it's reasonably
- safe to use it in other times too. However, it may break request
- signing, such as with OAuth.
-
- Supports an optional ``fields`` parameter of key/value strings AND
- key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
- the MIME type is optional. For example::
-
- fields = {
- 'foo': 'bar',
- 'fakefile': ('foofile.txt', 'contents of foofile'),
- 'realfile': ('barfile.txt', open('realfile').read()),
- 'typedfile': ('bazfile.bin', open('bazfile').read(),
- 'image/jpeg'),
- 'nonamefile': 'contents of nonamefile field',
- }
-
- When uploading a file, providing a filename (the first parameter of the
- tuple) is optional but recommended to best mimick behavior of browsers.
-
- Note that if ``headers`` are supplied, the 'Content-Type' header will
- be overwritten because it depends on the dynamic random boundary string
- which is used to compose the body of the request. The random boundary
- string can be explicitly set with the ``multipart_boundary`` parameter.
- """
- if headers is None:
- headers = self.headers
-
- extra_kw = {'headers': {}}
-
- if fields:
- if 'body' in urlopen_kw:
- raise TypeError(
- "request got values for both 'fields' and 'body', can only specify one.")
-
- if encode_multipart:
- body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
- else:
- body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
-
- extra_kw['body'] = body
- extra_kw['headers'] = {'Content-Type': content_type}
-
- extra_kw['headers'].update(headers)
- extra_kw.update(urlopen_kw)
-
- return self.urlopen(method, url, **extra_kw)
diff --git a/python.d/python_modules/urllib3/response.py b/python.d/python_modules/urllib3/response.py
deleted file mode 100644
index 408d9996a..000000000
--- a/python.d/python_modules/urllib3/response.py
+++ /dev/null
@@ -1,622 +0,0 @@
-from __future__ import absolute_import
-from contextlib import contextmanager
-import zlib
-import io
-import logging
-from socket import timeout as SocketTimeout
-from socket import error as SocketError
-
-from ._collections import HTTPHeaderDict
-from .exceptions import (
- BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
- ResponseNotChunked, IncompleteRead, InvalidHeader
-)
-from .packages.six import string_types as basestring, binary_type, PY3
-from .packages.six.moves import http_client as httplib
-from .connection import HTTPException, BaseSSLError
-from .util.response import is_fp_closed, is_response_to_head
-
-log = logging.getLogger(__name__)
-
-
-class DeflateDecoder(object):
-
- def __init__(self):
- self._first_try = True
- self._data = binary_type()
- self._obj = zlib.decompressobj()
-
- def __getattr__(self, name):
- return getattr(self._obj, name)
-
- def decompress(self, data):
- if not data:
- return data
-
- if not self._first_try:
- return self._obj.decompress(data)
-
- self._data += data
- try:
- decompressed = self._obj.decompress(data)
- if decompressed:
- self._first_try = False
- self._data = None
- return decompressed
- except zlib.error:
- self._first_try = False
- self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
- try:
- return self.decompress(self._data)
- finally:
- self._data = None
-
-
-class GzipDecoder(object):
-
- def __init__(self):
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
-
- def __getattr__(self, name):
- return getattr(self._obj, name)
-
- def decompress(self, data):
- if not data:
- return data
- return self._obj.decompress(data)
-
-
-def _get_decoder(mode):
- if mode == 'gzip':
- return GzipDecoder()
-
- return DeflateDecoder()
-
-
-class HTTPResponse(io.IOBase):
- """
- HTTP Response container.
-
- Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
- loaded and decoded on-demand when the ``data`` property is accessed. This
- class is also compatible with the Python standard library's :mod:`io`
- module, and can hence be treated as a readable object in the context of that
- framework.
-
- Extra parameters for behaviour not present in httplib.HTTPResponse:
-
- :param preload_content:
- If True, the response's body will be preloaded during construction.
-
- :param decode_content:
- If True, attempts to decode specific content-encoding's based on headers
- (like 'gzip' and 'deflate') will be skipped and raw data will be used
- instead.
-
- :param original_response:
- When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
- object, it's convenient to include the original for debug purposes. It's
- otherwise unused.
-
- :param retries:
- The retries contains the last :class:`~urllib3.util.retry.Retry` that
- was used during the request.
-
- :param enforce_content_length:
- Enforce content length checking. Body returned by server must match
- value of Content-Length header, if present. Otherwise, raise error.
- """
-
- CONTENT_DECODERS = ['gzip', 'deflate']
- REDIRECT_STATUSES = [301, 302, 303, 307, 308]
-
- def __init__(self, body='', headers=None, status=0, version=0, reason=None,
- strict=0, preload_content=True, decode_content=True,
- original_response=None, pool=None, connection=None,
- retries=None, enforce_content_length=False, request_method=None):
-
- if isinstance(headers, HTTPHeaderDict):
- self.headers = headers
- else:
- self.headers = HTTPHeaderDict(headers)
- self.status = status
- self.version = version
- self.reason = reason
- self.strict = strict
- self.decode_content = decode_content
- self.retries = retries
- self.enforce_content_length = enforce_content_length
-
- self._decoder = None
- self._body = None
- self._fp = None
- self._original_response = original_response
- self._fp_bytes_read = 0
-
- if body and isinstance(body, (basestring, binary_type)):
- self._body = body
-
- self._pool = pool
- self._connection = connection
-
- if hasattr(body, 'read'):
- self._fp = body
-
- # Are we using the chunked-style of transfer encoding?
- self.chunked = False
- self.chunk_left = None
- tr_enc = self.headers.get('transfer-encoding', '').lower()
- # Don't incur the penalty of creating a list and then discarding it
- encodings = (enc.strip() for enc in tr_enc.split(","))
- if "chunked" in encodings:
- self.chunked = True
-
- # Determine length of response
- self.length_remaining = self._init_length(request_method)
-
- # If requested, preload the body.
- if preload_content and not self._body:
- self._body = self.read(decode_content=decode_content)
-
- def get_redirect_location(self):
- """
- Should we redirect and where to?
-
- :returns: Truthy redirect location string if we got a redirect status
- code and valid location. ``None`` if redirect status and no
- location. ``False`` if not a redirect status code.
- """
- if self.status in self.REDIRECT_STATUSES:
- return self.headers.get('location')
-
- return False
-
- def release_conn(self):
- if not self._pool or not self._connection:
- return
-
- self._pool._put_conn(self._connection)
- self._connection = None
-
- @property
- def data(self):
- # For backwords-compat with earlier urllib3 0.4 and earlier.
- if self._body:
- return self._body
-
- if self._fp:
- return self.read(cache_content=True)
-
- @property
- def connection(self):
- return self._connection
-
- def tell(self):
- """
- Obtain the number of bytes pulled over the wire so far. May differ from
- the amount of content returned by :meth:``HTTPResponse.read`` if bytes
- are encoded on the wire (e.g, compressed).
- """
- return self._fp_bytes_read
-
- def _init_length(self, request_method):
- """
- Set initial length value for Response content if available.
- """
- length = self.headers.get('content-length')
-
- if length is not None and self.chunked:
- # This Response will fail with an IncompleteRead if it can't be
- # received as chunked. This method falls back to attempt reading
- # the response before raising an exception.
- log.warning("Received response with both Content-Length and "
- "Transfer-Encoding set. This is expressly forbidden "
- "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
- "attempting to process response as Transfer-Encoding: "
- "chunked.")
- return None
-
- elif length is not None:
- try:
- # RFC 7230 section 3.3.2 specifies multiple content lengths can
- # be sent in a single Content-Length header
- # (e.g. Content-Length: 42, 42). This line ensures the values
- # are all valid ints and that as long as the `set` length is 1,
- # all values are the same. Otherwise, the header is invalid.
- lengths = set([int(val) for val in length.split(',')])
- if len(lengths) > 1:
- raise InvalidHeader("Content-Length contained multiple "
- "unmatching values (%s)" % length)
- length = lengths.pop()
- except ValueError:
- length = None
- else:
- if length < 0:
- length = None
-
- # Convert status to int for comparison
- # In some cases, httplib returns a status of "_UNKNOWN"
- try:
- status = int(self.status)
- except ValueError:
- status = 0
-
- # Check for responses that shouldn't include a body
- if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
- length = 0
-
- return length
-
- def _init_decoder(self):
- """
- Set-up the _decoder attribute if necessary.
- """
- # Note: content-encoding value should be case-insensitive, per RFC 7230
- # Section 3.2
- content_encoding = self.headers.get('content-encoding', '').lower()
- if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
- self._decoder = _get_decoder(content_encoding)
-
- def _decode(self, data, decode_content, flush_decoder):
- """
- Decode the data passed in and potentially flush the decoder.
- """
- try:
- if decode_content and self._decoder:
- data = self._decoder.decompress(data)
- except (IOError, zlib.error) as e:
- content_encoding = self.headers.get('content-encoding', '').lower()
- raise DecodeError(
- "Received response with content-encoding: %s, but "
- "failed to decode it." % content_encoding, e)
-
- if flush_decoder and decode_content:
- data += self._flush_decoder()
-
- return data
-
- def _flush_decoder(self):
- """
- Flushes the decoder. Should only be called if the decoder is actually
- being used.
- """
- if self._decoder:
- buf = self._decoder.decompress(b'')
- return buf + self._decoder.flush()
-
- return b''
-
- @contextmanager
- def _error_catcher(self):
- """
- Catch low-level python exceptions, instead re-raising urllib3
- variants, so that low-level exceptions are not leaked in the
- high-level api.
-
- On exit, release the connection back to the pool.
- """
- clean_exit = False
-
- try:
- try:
- yield
-
- except SocketTimeout:
- # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
- # there is yet no clean way to get at it from this context.
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except BaseSSLError as e:
- # FIXME: Is there a better way to differentiate between SSLErrors?
- if 'read operation timed out' not in str(e): # Defensive:
- # This shouldn't happen but just in case we're missing an edge
- # case, let's avoid swallowing SSL errors.
- raise
-
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except (HTTPException, SocketError) as e:
- # This includes IncompleteRead.
- raise ProtocolError('Connection broken: %r' % e, e)
-
- # If no exception is thrown, we should avoid cleaning up
- # unnecessarily.
- clean_exit = True
- finally:
- # If we didn't terminate cleanly, we need to throw away our
- # connection.
- if not clean_exit:
- # The response may not be closed but we're not going to use it
- # anymore so close it now to ensure that the connection is
- # released back to the pool.
- if self._original_response:
- self._original_response.close()
-
- # Closing the response may not actually be sufficient to close
- # everything, so if we have a hold of the connection close that
- # too.
- if self._connection:
- self._connection.close()
-
- # If we hold the original response but it's closed now, we should
- # return the connection back to the pool.
- if self._original_response and self._original_response.isclosed():
- self.release_conn()
-
- def read(self, amt=None, decode_content=None, cache_content=False):
- """
- Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
- parameters: ``decode_content`` and ``cache_content``.
-
- :param amt:
- How much of the content to read. If specified, caching is skipped
- because it doesn't make sense to cache partial content as the full
- response.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
-
- :param cache_content:
- If True, will save the returned data such that the same result is
- returned despite of the state of the underlying file object. This
- is useful if you want the ``.data`` property to continue working
- after having ``.read()`` the file object. (Overridden if ``amt`` is
- set.)
- """
- self._init_decoder()
- if decode_content is None:
- decode_content = self.decode_content
-
- if self._fp is None:
- return
-
- flush_decoder = False
- data = None
-
- with self._error_catcher():
- if amt is None:
- # cStringIO doesn't like amt=None
- data = self._fp.read()
- flush_decoder = True
- else:
- cache_content = False
- data = self._fp.read(amt)
- if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
- # Close the connection when no data is returned
- #
- # This is redundant to what httplib/http.client _should_
- # already do. However, versions of python released before
- # December 15, 2012 (http://bugs.python.org/issue16298) do
- # not properly close the connection in all cases. There is
- # no harm in redundantly calling close.
- self._fp.close()
- flush_decoder = True
- if self.enforce_content_length and self.length_remaining not in (0, None):
- # This is an edge case that httplib failed to cover due
- # to concerns of backward compatibility. We're
- # addressing it here to make sure IncompleteRead is
- # raised during streaming, so all calls with incorrect
- # Content-Length are caught.
- raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
-
- if data:
- self._fp_bytes_read += len(data)
- if self.length_remaining is not None:
- self.length_remaining -= len(data)
-
- data = self._decode(data, decode_content, flush_decoder)
-
- if cache_content:
- self._body = data
-
- return data
-
- def stream(self, amt=2**16, decode_content=None):
- """
- A generator wrapper for the read() method. A call will block until
- ``amt`` bytes have been read from the connection or until the
- connection is closed.
-
- :param amt:
- How much of the content to read. The generator will return up to
- much data per iteration, but may return less. This is particularly
- likely when using compressed data. However, the empty string will
- never be returned.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
- """
- if self.chunked and self.supports_chunked_reads():
- for line in self.read_chunked(amt, decode_content=decode_content):
- yield line
- else:
- while not is_fp_closed(self._fp):
- data = self.read(amt=amt, decode_content=decode_content)
-
- if data:
- yield data
-
- @classmethod
- def from_httplib(ResponseCls, r, **response_kw):
- """
- Given an :class:`httplib.HTTPResponse` instance ``r``, return a
- corresponding :class:`urllib3.response.HTTPResponse` object.
-
- Remaining parameters are passed to the HTTPResponse constructor, along
- with ``original_response=r``.
- """
- headers = r.msg
-
- if not isinstance(headers, HTTPHeaderDict):
- if PY3: # Python 3
- headers = HTTPHeaderDict(headers.items())
- else: # Python 2
- headers = HTTPHeaderDict.from_httplib(headers)
-
- # HTTPResponse objects in Python 3 don't have a .strict attribute
- strict = getattr(r, 'strict', 0)
- resp = ResponseCls(body=r,
- headers=headers,
- status=r.status,
- version=r.version,
- reason=r.reason,
- strict=strict,
- original_response=r,
- **response_kw)
- return resp
-
- # Backwards-compatibility methods for httplib.HTTPResponse
- def getheaders(self):
- return self.headers
-
- def getheader(self, name, default=None):
- return self.headers.get(name, default)
-
- # Overrides from io.IOBase
- def close(self):
- if not self.closed:
- self._fp.close()
-
- if self._connection:
- self._connection.close()
-
- @property
- def closed(self):
- if self._fp is None:
- return True
- elif hasattr(self._fp, 'isclosed'):
- return self._fp.isclosed()
- elif hasattr(self._fp, 'closed'):
- return self._fp.closed
- else:
- return True
-
- def fileno(self):
- if self._fp is None:
- raise IOError("HTTPResponse has no file to get a fileno from")
- elif hasattr(self._fp, "fileno"):
- return self._fp.fileno()
- else:
- raise IOError("The file-like object this HTTPResponse is wrapped "
- "around has no file descriptor")
-
- def flush(self):
- if self._fp is not None and hasattr(self._fp, 'flush'):
- return self._fp.flush()
-
- def readable(self):
- # This method is required for `io` module compatibility.
- return True
-
- def readinto(self, b):
- # This method is required for `io` module compatibility.
- temp = self.read(len(b))
- if len(temp) == 0:
- return 0
- else:
- b[:len(temp)] = temp
- return len(temp)
-
- def supports_chunked_reads(self):
- """
- Checks if the underlying file-like object looks like a
- httplib.HTTPResponse object. We do this by testing for the fp
- attribute. If it is present we assume it returns raw chunks as
- processed by read_chunked().
- """
- return hasattr(self._fp, 'fp')
-
- def _update_chunk_length(self):
- # First, we'll figure out length of a chunk and then
- # we'll try to read it from socket.
- if self.chunk_left is not None:
- return
- line = self._fp.fp.readline()
- line = line.split(b';', 1)[0]
- try:
- self.chunk_left = int(line, 16)
- except ValueError:
- # Invalid chunked protocol response, abort.
- self.close()
- raise httplib.IncompleteRead(line)
-
- def _handle_chunk(self, amt):
- returned_chunk = None
- if amt is None:
- chunk = self._fp._safe_read(self.chunk_left)
- returned_chunk = chunk
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- elif amt < self.chunk_left:
- value = self._fp._safe_read(amt)
- self.chunk_left = self.chunk_left - amt
- returned_chunk = value
- elif amt == self.chunk_left:
- value = self._fp._safe_read(amt)
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- returned_chunk = value
- else: # amt > self.chunk_left
- returned_chunk = self._fp._safe_read(self.chunk_left)
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- return returned_chunk
-
- def read_chunked(self, amt=None, decode_content=None):
- """
- Similar to :meth:`HTTPResponse.read`, but with an additional
- parameter: ``decode_content``.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
- """
- self._init_decoder()
- # FIXME: Rewrite this method and make it a class with a better structured logic.
- if not self.chunked:
- raise ResponseNotChunked(
- "Response is not chunked. "
- "Header 'transfer-encoding: chunked' is missing.")
- if not self.supports_chunked_reads():
- raise BodyNotHttplibCompatible(
- "Body should be httplib.HTTPResponse like. "
- "It should have have an fp attribute which returns raw chunks.")
-
- # Don't bother reading the body of a HEAD request.
- if self._original_response and is_response_to_head(self._original_response):
- self._original_response.close()
- return
-
- with self._error_catcher():
- while True:
- self._update_chunk_length()
- if self.chunk_left == 0:
- break
- chunk = self._handle_chunk(amt)
- decoded = self._decode(chunk, decode_content=decode_content,
- flush_decoder=False)
- if decoded:
- yield decoded
-
- if decode_content:
- # On CPython and PyPy, we should never need to flush the
- # decoder. However, on Jython we *might* need to, so
- # lets defensively do it anyway.
- decoded = self._flush_decoder()
- if decoded: # Platform-specific: Jython.
- yield decoded
-
- # Chunk content ends with \r\n: discard it.
- while True:
- line = self._fp.fp.readline()
- if not line:
- # Some sites may not end with '\r\n'.
- break
- if line == b'\r\n':
- break
-
- # We read everything; close the "file".
- if self._original_response:
- self._original_response.close()
diff --git a/python.d/python_modules/urllib3/util/__init__.py b/python.d/python_modules/urllib3/util/__init__.py
deleted file mode 100644
index 2f2770b62..000000000
--- a/python.d/python_modules/urllib3/util/__init__.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from __future__ import absolute_import
-# For backwards compatibility, provide imports that used to be here.
-from .connection import is_connection_dropped
-from .request import make_headers
-from .response import is_fp_closed
-from .ssl_ import (
- SSLContext,
- HAS_SNI,
- IS_PYOPENSSL,
- IS_SECURETRANSPORT,
- assert_fingerprint,
- resolve_cert_reqs,
- resolve_ssl_version,
- ssl_wrap_socket,
-)
-from .timeout import (
- current_time,
- Timeout,
-)
-
-from .retry import Retry
-from .url import (
- get_host,
- parse_url,
- split_first,
- Url,
-)
-from .wait import (
- wait_for_read,
- wait_for_write
-)
-
-__all__ = (
- 'HAS_SNI',
- 'IS_PYOPENSSL',
- 'IS_SECURETRANSPORT',
- 'SSLContext',
- 'Retry',
- 'Timeout',
- 'Url',
- 'assert_fingerprint',
- 'current_time',
- 'is_connection_dropped',
- 'is_fp_closed',
- 'get_host',
- 'parse_url',
- 'make_headers',
- 'resolve_cert_reqs',
- 'resolve_ssl_version',
- 'split_first',
- 'ssl_wrap_socket',
- 'wait_for_read',
- 'wait_for_write'
-)
diff --git a/python.d/python_modules/urllib3/util/connection.py b/python.d/python_modules/urllib3/util/connection.py
deleted file mode 100644
index bf699cfd0..000000000
--- a/python.d/python_modules/urllib3/util/connection.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from __future__ import absolute_import
-import socket
-from .wait import wait_for_read
-from .selectors import HAS_SELECT, SelectorError
-
-
-def is_connection_dropped(conn): # Platform-specific
- """
- Returns True if the connection is dropped and should be closed.
-
- :param conn:
- :class:`httplib.HTTPConnection` object.
-
- Note: For platforms like AppEngine, this will always return ``False`` to
- let the platform handle connection recycling transparently for us.
- """
- sock = getattr(conn, 'sock', False)
- if sock is False: # Platform-specific: AppEngine
- return False
- if sock is None: # Connection already closed (such as by httplib).
- return True
-
- if not HAS_SELECT:
- return False
-
- try:
- return bool(wait_for_read(sock, timeout=0.0))
- except SelectorError:
- return True
-
-
-# This function is copied from socket.py in the Python 2.7 standard
-# library test suite. Added to its signature is only `socket_options`.
-# One additional modification is that we avoid binding to IPv6 servers
-# discovered in DNS if the system doesn't have IPv6 functionality.
-def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- source_address=None, socket_options=None):
- """Connect to *address* and return the socket object.
-
- Convenience function. Connect to *address* (a 2-tuple ``(host,
- port)``) and return the socket object. Passing the optional
- *timeout* parameter will set the timeout on the socket instance
- before attempting to connect. If no *timeout* is supplied, the
- global default timeout setting returned by :func:`getdefaulttimeout`
- is used. If *source_address* is set it must be a tuple of (host, port)
- for the socket to bind as a source address before making the connection.
- An host of '' or port 0 tells the OS to use the default.
- """
-
- host, port = address
- if host.startswith('['):
- host = host.strip('[]')
- err = None
-
- # Using the value from allowed_gai_family() in the context of getaddrinfo lets
- # us select whether to work with IPv4 DNS records, IPv6 records, or both.
- # The original create_connection function always returns all records.
- family = allowed_gai_family()
-
- for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- sock = None
- try:
- sock = socket.socket(af, socktype, proto)
-
- # If provided, set socket level options before connecting.
- _set_socket_options(sock, socket_options)
-
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(timeout)
- if source_address:
- sock.bind(source_address)
- sock.connect(sa)
- return sock
-
- except socket.error as e:
- err = e
- if sock is not None:
- sock.close()
- sock = None
-
- if err is not None:
- raise err
-
- raise socket.error("getaddrinfo returns an empty list")
-
-
-def _set_socket_options(sock, options):
- if options is None:
- return
-
- for opt in options:
- sock.setsockopt(*opt)
-
-
-def allowed_gai_family():
- """This function is designed to work in the context of
- getaddrinfo, where family=socket.AF_UNSPEC is the default and
- will perform a DNS search for both IPv6 and IPv4 records."""
-
- family = socket.AF_INET
- if HAS_IPV6:
- family = socket.AF_UNSPEC
- return family
-
-
-def _has_ipv6(host):
- """ Returns True if the system can bind an IPv6 address. """
- sock = None
- has_ipv6 = False
-
- if socket.has_ipv6:
- # has_ipv6 returns true if cPython was compiled with IPv6 support.
- # It does not tell us if the system has IPv6 support enabled. To
- # determine that we must bind to an IPv6 address.
- # https://github.com/shazow/urllib3/pull/611
- # https://bugs.python.org/issue658327
- try:
- sock = socket.socket(socket.AF_INET6)
- sock.bind((host, 0))
- has_ipv6 = True
- except Exception:
- pass
-
- if sock:
- sock.close()
- return has_ipv6
-
-
-HAS_IPV6 = _has_ipv6('::1')
diff --git a/python.d/python_modules/urllib3/util/request.py b/python.d/python_modules/urllib3/util/request.py
deleted file mode 100644
index 3ddfcd559..000000000
--- a/python.d/python_modules/urllib3/util/request.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from __future__ import absolute_import
-from base64 import b64encode
-
-from ..packages.six import b, integer_types
-from ..exceptions import UnrewindableBodyError
-
-ACCEPT_ENCODING = 'gzip,deflate'
-_FAILEDTELL = object()
-
-
-def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
- basic_auth=None, proxy_basic_auth=None, disable_cache=None):
- """
- Shortcuts for generating request headers.
-
- :param keep_alive:
- If ``True``, adds 'connection: keep-alive' header.
-
- :param accept_encoding:
- Can be a boolean, list, or string.
- ``True`` translates to 'gzip,deflate'.
- List will get joined by comma.
- String will be used as provided.
-
- :param user_agent:
- String representing the user-agent you want, such as
- "python-urllib3/0.6"
-
- :param basic_auth:
- Colon-separated username:password string for 'authorization: basic ...'
- auth header.
-
- :param proxy_basic_auth:
- Colon-separated username:password string for 'proxy-authorization: basic ...'
- auth header.
-
- :param disable_cache:
- If ``True``, adds 'cache-control: no-cache' header.
-
- Example::
-
- >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
- {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
- >>> make_headers(accept_encoding=True)
- {'accept-encoding': 'gzip,deflate'}
- """
- headers = {}
- if accept_encoding:
- if isinstance(accept_encoding, str):
- pass
- elif isinstance(accept_encoding, list):
- accept_encoding = ','.join(accept_encoding)
- else:
- accept_encoding = ACCEPT_ENCODING
- headers['accept-encoding'] = accept_encoding
-
- if user_agent:
- headers['user-agent'] = user_agent
-
- if keep_alive:
- headers['connection'] = 'keep-alive'
-
- if basic_auth:
- headers['authorization'] = 'Basic ' + \
- b64encode(b(basic_auth)).decode('utf-8')
-
- if proxy_basic_auth:
- headers['proxy-authorization'] = 'Basic ' + \
- b64encode(b(proxy_basic_auth)).decode('utf-8')
-
- if disable_cache:
- headers['cache-control'] = 'no-cache'
-
- return headers
-
-
-def set_file_position(body, pos):
- """
- If a position is provided, move file to that point.
- Otherwise, we'll attempt to record a position for future use.
- """
- if pos is not None:
- rewind_body(body, pos)
- elif getattr(body, 'tell', None) is not None:
- try:
- pos = body.tell()
- except (IOError, OSError):
- # This differentiates from None, allowing us to catch
- # a failed `tell()` later when trying to rewind the body.
- pos = _FAILEDTELL
-
- return pos
-
-
-def rewind_body(body, body_pos):
- """
- Attempt to rewind body to a certain position.
- Primarily used for request redirects and retries.
-
- :param body:
- File-like object that supports seek.
-
- :param int pos:
- Position to seek to in file.
- """
- body_seek = getattr(body, 'seek', None)
- if body_seek is not None and isinstance(body_pos, integer_types):
- try:
- body_seek(body_pos)
- except (IOError, OSError):
- raise UnrewindableBodyError("An error occurred when rewinding request "
- "body for redirect/retry.")
- elif body_pos is _FAILEDTELL:
- raise UnrewindableBodyError("Unable to record file position for rewinding "
- "request body during a redirect/retry.")
- else:
- raise ValueError("body_pos must be of type integer, "
- "instead it was %s." % type(body_pos))
diff --git a/python.d/python_modules/urllib3/util/response.py b/python.d/python_modules/urllib3/util/response.py
deleted file mode 100644
index 67cf730ab..000000000
--- a/python.d/python_modules/urllib3/util/response.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from __future__ import absolute_import
-from ..packages.six.moves import http_client as httplib
-
-from ..exceptions import HeaderParsingError
-
-
-def is_fp_closed(obj):
- """
- Checks whether a given file-like object is closed.
-
- :param obj:
- The file-like object to check.
- """
-
- try:
- # Check `isclosed()` first, in case Python3 doesn't set `closed`.
- # GH Issue #928
- return obj.isclosed()
- except AttributeError:
- pass
-
- try:
- # Check via the official file-like-object way.
- return obj.closed
- except AttributeError:
- pass
-
- try:
- # Check if the object is a container for another file-like object that
- # gets released on exhaustion (e.g. HTTPResponse).
- return obj.fp is None
- except AttributeError:
- pass
-
- raise ValueError("Unable to determine whether fp is closed.")
-
-
-def assert_header_parsing(headers):
- """
- Asserts whether all headers have been successfully parsed.
- Extracts encountered errors from the result of parsing headers.
-
- Only works on Python 3.
-
- :param headers: Headers to verify.
- :type headers: `httplib.HTTPMessage`.
-
- :raises urllib3.exceptions.HeaderParsingError:
- If parsing errors are found.
- """
-
- # This will fail silently if we pass in the wrong kind of parameter.
- # To make debugging easier add an explicit check.
- if not isinstance(headers, httplib.HTTPMessage):
- raise TypeError('expected httplib.Message, got {0}.'.format(
- type(headers)))
-
- defects = getattr(headers, 'defects', None)
- get_payload = getattr(headers, 'get_payload', None)
-
- unparsed_data = None
- if get_payload: # Platform-specific: Python 3.
- unparsed_data = get_payload()
-
- if defects or unparsed_data:
- raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
-
-
-def is_response_to_head(response):
- """
- Checks whether the request of a response has been a HEAD-request.
- Handles the quirks of AppEngine.
-
- :param conn:
- :type conn: :class:`httplib.HTTPResponse`
- """
- # FIXME: Can we do this somehow without accessing private httplib _method?
- method = response._method
- if isinstance(method, int): # Platform-specific: Appengine
- return method == 3
- return method.upper() == 'HEAD'
diff --git a/python.d/python_modules/urllib3/util/retry.py b/python.d/python_modules/urllib3/util/retry.py
deleted file mode 100644
index c603cb490..000000000
--- a/python.d/python_modules/urllib3/util/retry.py
+++ /dev/null
@@ -1,401 +0,0 @@
-from __future__ import absolute_import
-import time
-import logging
-from collections import namedtuple
-from itertools import takewhile
-import email
-import re
-
-from ..exceptions import (
- ConnectTimeoutError,
- MaxRetryError,
- ProtocolError,
- ReadTimeoutError,
- ResponseError,
- InvalidHeader,
-)
-from ..packages import six
-
-
-log = logging.getLogger(__name__)
-
-# Data structure for representing the metadata of requests that result in a retry.
-RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
- "status", "redirect_location"])
-
-
-class Retry(object):
- """ Retry configuration.
-
- Each retry attempt will create a new Retry object with updated values, so
- they can be safely reused.
-
- Retries can be defined as a default for a pool::
-
- retries = Retry(connect=5, read=2, redirect=5)
- http = PoolManager(retries=retries)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool)::
-
- response = http.request('GET', 'http://example.com/', retries=Retry(10))
-
- Retries can be disabled by passing ``False``::
-
- response = http.request('GET', 'http://example.com/', retries=False)
-
- Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
- retries are disabled, in which case the causing exception will be raised.
-
- :param int total:
- Total number of retries to allow. Takes precedence over other counts.
-
- Set to ``None`` to remove this constraint and fall back on other
- counts. It's a good idea to set this to some sensibly-high value to
- account for unexpected edge cases and avoid infinite retry loops.
-
- Set to ``0`` to fail on the first retry.
-
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
-
- :param int connect:
- How many connection-related errors to retry on.
-
- These are errors raised before the request is sent to the remote server,
- which we assume has not triggered the server to process the request.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param int read:
- How many times to retry on read errors.
-
- These errors are raised after the request was sent to the server, so the
- request may have side-effects.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param int redirect:
- How many redirects to perform. Limit this to avoid infinite redirect
- loops.
-
- A redirect is a HTTP response with a status code 301, 302, 303, 307 or
- 308.
-
- Set to ``0`` to fail on the first retry of this type.
-
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
-
- :param int status:
- How many times to retry on bad status codes.
-
- These are retries made on responses, where status code matches
- ``status_forcelist``.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param iterable method_whitelist:
- Set of uppercased HTTP method verbs that we should retry on.
-
- By default, we only retry on methods which are considered to be
- idempotent (multiple requests with the same parameters end with the
- same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
-
- Set to a ``False`` value to retry on any verb.
-
- :param iterable status_forcelist:
- A set of integer HTTP status codes that we should force a retry on.
- A retry is initiated if the request method is in ``method_whitelist``
- and the response status code is in ``status_forcelist``.
-
- By default, this is disabled with ``None``.
-
- :param float backoff_factor:
- A backoff factor to apply between attempts after the second try
- (most errors are resolved immediately by a second try without a
- delay). urllib3 will sleep for::
-
- {backoff factor} * (2 ^ ({number of total retries} - 1))
-
- seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
- for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
- than :attr:`Retry.BACKOFF_MAX`.
-
- By default, backoff is disabled (set to 0).
-
- :param bool raise_on_redirect: Whether, if the number of redirects is
- exhausted, to raise a MaxRetryError, or to return a response with a
- response code in the 3xx range.
-
- :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
- whether we should raise an exception, or return a response,
- if status falls in ``status_forcelist`` range and retries have
- been exhausted.
-
- :param tuple history: The history of the request encountered during
- each call to :meth:`~Retry.increment`. The list is in the order
- the requests occurred. Each list item is of class :class:`RequestHistory`.
-
- :param bool respect_retry_after_header:
- Whether to respect Retry-After header on status codes defined as
- :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
-
- """
-
- DEFAULT_METHOD_WHITELIST = frozenset([
- 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
-
- RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
-
- #: Maximum backoff time.
- BACKOFF_MAX = 120
-
- def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
- method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
- backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
- history=None, respect_retry_after_header=True):
-
- self.total = total
- self.connect = connect
- self.read = read
- self.status = status
-
- if redirect is False or total is False:
- redirect = 0
- raise_on_redirect = False
-
- self.redirect = redirect
- self.status_forcelist = status_forcelist or set()
- self.method_whitelist = method_whitelist
- self.backoff_factor = backoff_factor
- self.raise_on_redirect = raise_on_redirect
- self.raise_on_status = raise_on_status
- self.history = history or tuple()
- self.respect_retry_after_header = respect_retry_after_header
-
- def new(self, **kw):
- params = dict(
- total=self.total,
- connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
- method_whitelist=self.method_whitelist,
- status_forcelist=self.status_forcelist,
- backoff_factor=self.backoff_factor,
- raise_on_redirect=self.raise_on_redirect,
- raise_on_status=self.raise_on_status,
- history=self.history,
- )
- params.update(kw)
- return type(self)(**params)
-
- @classmethod
- def from_int(cls, retries, redirect=True, default=None):
- """ Backwards-compatibility for the old retries format."""
- if retries is None:
- retries = default if default is not None else cls.DEFAULT
-
- if isinstance(retries, Retry):
- return retries
-
- redirect = bool(redirect) and None
- new_retries = cls(retries, redirect=redirect)
- log.debug("Converted retries value: %r -> %r", retries, new_retries)
- return new_retries
-
- def get_backoff_time(self):
- """ Formula for computing the current backoff
-
- :rtype: float
- """
- # We want to consider only the last consecutive errors sequence (Ignore redirects).
- consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
- reversed(self.history))))
- if consecutive_errors_len <= 1:
- return 0
-
- backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
- return min(self.BACKOFF_MAX, backoff_value)
-
- def parse_retry_after(self, retry_after):
- # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
- if re.match(r"^\s*[0-9]+\s*$", retry_after):
- seconds = int(retry_after)
- else:
- retry_date_tuple = email.utils.parsedate(retry_after)
- if retry_date_tuple is None:
- raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
- retry_date = time.mktime(retry_date_tuple)
- seconds = retry_date - time.time()
-
- if seconds < 0:
- seconds = 0
-
- return seconds
-
- def get_retry_after(self, response):
- """ Get the value of Retry-After in seconds. """
-
- retry_after = response.getheader("Retry-After")
-
- if retry_after is None:
- return None
-
- return self.parse_retry_after(retry_after)
-
- def sleep_for_retry(self, response=None):
- retry_after = self.get_retry_after(response)
- if retry_after:
- time.sleep(retry_after)
- return True
-
- return False
-
- def _sleep_backoff(self):
- backoff = self.get_backoff_time()
- if backoff <= 0:
- return
- time.sleep(backoff)
-
- def sleep(self, response=None):
- """ Sleep between retry attempts.
-
- This method will respect a server's ``Retry-After`` response header
- and sleep the duration of the time requested. If that is not present, it
- will use an exponential backoff. By default, the backoff factor is 0 and
- this method will return immediately.
- """
-
- if response:
- slept = self.sleep_for_retry(response)
- if slept:
- return
-
- self._sleep_backoff()
-
- def _is_connection_error(self, err):
- """ Errors when we're fairly sure that the server did not receive the
- request, so it should be safe to retry.
- """
- return isinstance(err, ConnectTimeoutError)
-
- def _is_read_error(self, err):
- """ Errors that occur after the request has been started, so we should
- assume that the server began processing it.
- """
- return isinstance(err, (ReadTimeoutError, ProtocolError))
-
- def _is_method_retryable(self, method):
- """ Checks if a given HTTP method should be retried upon, depending if
- it is included on the method whitelist.
- """
- if self.method_whitelist and method.upper() not in self.method_whitelist:
- return False
-
- return True
-
- def is_retry(self, method, status_code, has_retry_after=False):
- """ Is this method/status code retryable? (Based on whitelists and control
- variables such as the number of total retries to allow, whether to
- respect the Retry-After header, whether this header is present, and
- whether the returned status code is on the list of status codes to
- be retried upon on the presence of the aforementioned header)
- """
- if not self._is_method_retryable(method):
- return False
-
- if self.status_forcelist and status_code in self.status_forcelist:
- return True
-
- return (self.total and self.respect_retry_after_header and
- has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
-
- def is_exhausted(self):
- """ Are we out of retries? """
- retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
- retry_counts = list(filter(None, retry_counts))
- if not retry_counts:
- return False
-
- return min(retry_counts) < 0
-
- def increment(self, method=None, url=None, response=None, error=None,
- _pool=None, _stacktrace=None):
- """ Return a new Retry object with incremented retry counters.
-
- :param response: A response object, or None, if the server did not
- return a response.
- :type response: :class:`~urllib3.response.HTTPResponse`
- :param Exception error: An error encountered during the request, or
- None if the response was received successfully.
-
- :return: A new ``Retry`` object.
- """
- if self.total is False and error:
- # Disabled, indicate to re-raise the error.
- raise six.reraise(type(error), error, _stacktrace)
-
- total = self.total
- if total is not None:
- total -= 1
-
- connect = self.connect
- read = self.read
- redirect = self.redirect
- status_count = self.status
- cause = 'unknown'
- status = None
- redirect_location = None
-
- if error and self._is_connection_error(error):
- # Connect retry?
- if connect is False:
- raise six.reraise(type(error), error, _stacktrace)
- elif connect is not None:
- connect -= 1
-
- elif error and self._is_read_error(error):
- # Read retry?
- if read is False or not self._is_method_retryable(method):
- raise six.reraise(type(error), error, _stacktrace)
- elif read is not None:
- read -= 1
-
- elif response and response.get_redirect_location():
- # Redirect retry?
- if redirect is not None:
- redirect -= 1
- cause = 'too many redirects'
- redirect_location = response.get_redirect_location()
- status = response.status
-
- else:
- # Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
- cause = ResponseError.GENERIC_ERROR
- if response and response.status:
- if status_count is not None:
- status_count -= 1
- cause = ResponseError.SPECIFIC_ERROR.format(
- status_code=response.status)
- status = response.status
-
- history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
-
- new_retry = self.new(
- total=total,
- connect=connect, read=read, redirect=redirect, status=status_count,
- history=history)
-
- if new_retry.is_exhausted():
- raise MaxRetryError(_pool, url, error or ResponseError(cause))
-
- log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
-
- return new_retry
-
- def __repr__(self):
- return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
- 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
- cls=type(self), self=self)
-
-
-# For backwards compatibility (equivalent to pre-v1.9):
-Retry.DEFAULT = Retry(3)
diff --git a/python.d/python_modules/urllib3/util/selectors.py b/python.d/python_modules/urllib3/util/selectors.py
deleted file mode 100644
index d75cb266b..000000000
--- a/python.d/python_modules/urllib3/util/selectors.py
+++ /dev/null
@@ -1,581 +0,0 @@
-# Backport of selectors.py from Python 3.5+ to support Python < 3.4
-# Also has the behavior specified in PEP 475 which is to retry syscalls
-# in the case of an EINTR error. This module is required because selectors34
-# does not follow this behavior and instead returns that no dile descriptor
-# events have occurred rather than retry the syscall. The decision to drop
-# support for select.devpoll is made to maintain 100% test coverage.
-
-import errno
-import math
-import select
-import socket
-import sys
-import time
-from collections import namedtuple, Mapping
-
-try:
- monotonic = time.monotonic
-except (AttributeError, ImportError): # Python 3.3<
- monotonic = time.time
-
-EVENT_READ = (1 << 0)
-EVENT_WRITE = (1 << 1)
-
-HAS_SELECT = True # Variable that shows whether the platform has a selector.
-_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
-_DEFAULT_SELECTOR = None
-
-
-class SelectorError(Exception):
- def __init__(self, errcode):
- super(SelectorError, self).__init__()
- self.errno = errcode
-
- def __repr__(self):
- return "<SelectorError errno={0}>".format(self.errno)
-
- def __str__(self):
- return self.__repr__()
-
-
-def _fileobj_to_fd(fileobj):
- """ Return a file descriptor from a file object. If
- given an integer will simply return that integer back. """
- if isinstance(fileobj, int):
- fd = fileobj
- else:
- try:
- fd = int(fileobj.fileno())
- except (AttributeError, TypeError, ValueError):
- raise ValueError("Invalid file object: {0!r}".format(fileobj))
- if fd < 0:
- raise ValueError("Invalid file descriptor: {0}".format(fd))
- return fd
-
-
-# Determine which function to use to wrap system calls because Python 3.5+
-# already handles the case when system calls are interrupted.
-if sys.version_info >= (3, 5):
- def _syscall_wrapper(func, _, *args, **kwargs):
- """ This is the short-circuit version of the below logic
- because in Python 3.5+ all system calls automatically restart
- and recalculate their timeouts. """
- try:
- return func(*args, **kwargs)
- except (OSError, IOError, select.error) as e:
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- raise SelectorError(errcode)
-else:
- def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
- """ Wrapper function for syscalls that could fail due to EINTR.
- All functions should be retried if there is time left in the timeout
- in accordance with PEP 475. """
- timeout = kwargs.get("timeout", None)
- if timeout is None:
- expires = None
- recalc_timeout = False
- else:
- timeout = float(timeout)
- if timeout < 0.0: # Timeout less than 0 treated as no timeout.
- expires = None
- else:
- expires = monotonic() + timeout
-
- args = list(args)
- if recalc_timeout and "timeout" not in kwargs:
- raise ValueError(
- "Timeout must be in args or kwargs to be recalculated")
-
- result = _SYSCALL_SENTINEL
- while result is _SYSCALL_SENTINEL:
- try:
- result = func(*args, **kwargs)
- # OSError is thrown by select.select
- # IOError is thrown by select.epoll.poll
- # select.error is thrown by select.poll.poll
- # Aren't we thankful for Python 3.x rework for exceptions?
- except (OSError, IOError, select.error) as e:
- # select.error wasn't a subclass of OSError in the past.
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- elif hasattr(e, "args"):
- errcode = e.args[0]
-
- # Also test for the Windows equivalent of EINTR.
- is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
- errcode == errno.WSAEINTR))
-
- if is_interrupt:
- if expires is not None:
- current_time = monotonic()
- if current_time > expires:
- raise OSError(errno=errno.ETIMEDOUT)
- if recalc_timeout:
- if "timeout" in kwargs:
- kwargs["timeout"] = expires - current_time
- continue
- if errcode:
- raise SelectorError(errcode)
- else:
- raise
- return result
-
-
-SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
-
-
-class _SelectorMapping(Mapping):
- """ Mapping of file objects to selector keys """
-
- def __init__(self, selector):
- self._selector = selector
-
- def __len__(self):
- return len(self._selector._fd_to_key)
-
- def __getitem__(self, fileobj):
- try:
- fd = self._selector._fileobj_lookup(fileobj)
- return self._selector._fd_to_key[fd]
- except KeyError:
- raise KeyError("{0!r} is not registered.".format(fileobj))
-
- def __iter__(self):
- return iter(self._selector._fd_to_key)
-
-
-class BaseSelector(object):
- """ Abstract Selector class
-
- A selector supports registering file objects to be monitored
- for specific I/O events.
-
- A file object is a file descriptor or any object with a
- `fileno()` method. An arbitrary object can be attached to the
- file object which can be used for example to store context info,
- a callback, etc.
-
- A selector can use various implementations (select(), poll(), epoll(),
- and kqueue()) depending on the platform. The 'DefaultSelector' class uses
- the most efficient implementation for the current platform.
- """
- def __init__(self):
- # Maps file descriptors to keys.
- self._fd_to_key = {}
-
- # Read-only mapping returned by get_map()
- self._map = _SelectorMapping(self)
-
- def _fileobj_lookup(self, fileobj):
- """ Return a file descriptor from a file object.
- This wraps _fileobj_to_fd() to do an exhaustive
- search in case the object is invalid but we still
- have it in our map. Used by unregister() so we can
- unregister an object that was previously registered
- even if it is closed. It is also used by _SelectorMapping
- """
- try:
- return _fileobj_to_fd(fileobj)
- except ValueError:
-
- # Search through all our mapped keys.
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- return key.fd
-
- # Raise ValueError after all.
- raise
-
- def register(self, fileobj, events, data=None):
- """ Register a file object for a set of events to monitor. """
- if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
- raise ValueError("Invalid events: {0!r}".format(events))
-
- key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
-
- if key.fd in self._fd_to_key:
- raise KeyError("{0!r} (FD {1}) is already registered"
- .format(fileobj, key.fd))
-
- self._fd_to_key[key.fd] = key
- return key
-
- def unregister(self, fileobj):
- """ Unregister a file object from being monitored. """
- try:
- key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- # Getting the fileno of a closed socket on Windows errors with EBADF.
- except socket.error as e: # Platform-specific: Windows.
- if e.errno != errno.EBADF:
- raise
- else:
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- self._fd_to_key.pop(key.fd)
- break
- else:
- raise KeyError("{0!r} is not registered".format(fileobj))
- return key
-
- def modify(self, fileobj, events, data=None):
- """ Change a registered file object monitored events and data. """
- # NOTE: Some subclasses optimize this operation even further.
- try:
- key = self._fd_to_key[self._fileobj_lookup(fileobj)]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- if events != key.events:
- self.unregister(fileobj)
- key = self.register(fileobj, events, data)
-
- elif data != key.data:
- # Use a shortcut to update the data.
- key = key._replace(data=data)
- self._fd_to_key[key.fd] = key
-
- return key
-
- def select(self, timeout=None):
- """ Perform the actual selection until some monitored file objects
- are ready or the timeout expires. """
- raise NotImplementedError()
-
- def close(self):
- """ Close the selector. This must be called to ensure that all
- underlying resources are freed. """
- self._fd_to_key.clear()
- self._map = None
-
- def get_key(self, fileobj):
- """ Return the key associated with a registered file object. """
- mapping = self.get_map()
- if mapping is None:
- raise RuntimeError("Selector is closed")
- try:
- return mapping[fileobj]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- def get_map(self):
- """ Return a mapping of file objects to selector keys """
- return self._map
-
- def _key_from_fd(self, fd):
- """ Return the key associated to a given file descriptor
- Return None if it is not found. """
- try:
- return self._fd_to_key[fd]
- except KeyError:
- return None
-
- def __enter__(self):
- return self
-
- def __exit__(self, *args):
- self.close()
-
-
-# Almost all platforms have select.select()
-if hasattr(select, "select"):
- class SelectSelector(BaseSelector):
- """ Select-based selector. """
- def __init__(self):
- super(SelectSelector, self).__init__()
- self._readers = set()
- self._writers = set()
-
- def register(self, fileobj, events, data=None):
- key = super(SelectSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- self._readers.add(key.fd)
- if events & EVENT_WRITE:
- self._writers.add(key.fd)
- return key
-
- def unregister(self, fileobj):
- key = super(SelectSelector, self).unregister(fileobj)
- self._readers.discard(key.fd)
- self._writers.discard(key.fd)
- return key
-
- def _select(self, r, w, timeout=None):
- """ Wrapper for select.select because timeout is a positional arg """
- return select.select(r, w, [], timeout)
-
- def select(self, timeout=None):
- # Selecting on empty lists on Windows errors out.
- if not len(self._readers) and not len(self._writers):
- return []
-
- timeout = None if timeout is None else max(timeout, 0.0)
- ready = []
- r, w, _ = _syscall_wrapper(self._select, True, self._readers,
- self._writers, timeout)
- r = set(r)
- w = set(w)
- for fd in r | w:
- events = 0
- if fd in r:
- events |= EVENT_READ
- if fd in w:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
-
-if hasattr(select, "poll"):
- class PollSelector(BaseSelector):
- """ Poll-based selector """
- def __init__(self):
- super(PollSelector, self).__init__()
- self._poll = select.poll()
-
- def register(self, fileobj, events, data=None):
- key = super(PollSelector, self).register(fileobj, events, data)
- event_mask = 0
- if events & EVENT_READ:
- event_mask |= select.POLLIN
- if events & EVENT_WRITE:
- event_mask |= select.POLLOUT
- self._poll.register(key.fd, event_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(PollSelector, self).unregister(fileobj)
- self._poll.unregister(key.fd)
- return key
-
- def _wrap_poll(self, timeout=None):
- """ Wrapper function for select.poll.poll() so that
- _syscall_wrapper can work with only seconds. """
- if timeout is not None:
- if timeout <= 0:
- timeout = 0
- else:
- # select.poll.poll() has a resolution of 1 millisecond,
- # round away from zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1e3)
-
- result = self._poll.poll(timeout)
- return result
-
- def select(self, timeout=None):
- ready = []
- fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.POLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.POLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
-
- return ready
-
-
-if hasattr(select, "epoll"):
- class EpollSelector(BaseSelector):
- """ Epoll-based selector """
- def __init__(self):
- super(EpollSelector, self).__init__()
- self._epoll = select.epoll()
-
- def fileno(self):
- return self._epoll.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(EpollSelector, self).register(fileobj, events, data)
- events_mask = 0
- if events & EVENT_READ:
- events_mask |= select.EPOLLIN
- if events & EVENT_WRITE:
- events_mask |= select.EPOLLOUT
- _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(EpollSelector, self).unregister(fileobj)
- try:
- _syscall_wrapper(self._epoll.unregister, False, key.fd)
- except SelectorError:
- # This can occur when the fd was closed since registry.
- pass
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- if timeout <= 0:
- timeout = 0.0
- else:
- # select.epoll.poll() has a resolution of 1 millisecond
- # but luckily takes seconds so we don't need a wrapper
- # like PollSelector. Just for better rounding.
- timeout = math.ceil(timeout * 1e3) * 1e-3
- timeout = float(timeout)
- else:
- timeout = -1.0 # epoll.poll() must have a float.
-
- # We always want at least 1 to ensure that select can be called
- # with no file descriptors registered. Otherwise will fail.
- max_events = max(len(self._fd_to_key), 1)
-
- ready = []
- fd_events = _syscall_wrapper(self._epoll.poll, True,
- timeout=timeout,
- maxevents=max_events)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.EPOLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.EPOLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
- def close(self):
- self._epoll.close()
- super(EpollSelector, self).close()
-
-
-if hasattr(select, "kqueue"):
- class KqueueSelector(BaseSelector):
- """ Kqueue / Kevent-based selector """
- def __init__(self):
- super(KqueueSelector, self).__init__()
- self._kqueue = select.kqueue()
-
- def fileno(self):
- return self._kqueue.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(KqueueSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- if events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- return key
-
- def unregister(self, fileobj):
- key = super(KqueueSelector, self).unregister(fileobj)
- if key.events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except SelectorError:
- pass
- if key.events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except SelectorError:
- pass
-
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- timeout = max(timeout, 0)
-
- max_events = len(self._fd_to_key) * 2
- ready_fds = {}
-
- kevent_list = _syscall_wrapper(self._kqueue.control, True,
- None, max_events, timeout)
-
- for kevent in kevent_list:
- fd = kevent.ident
- event_mask = kevent.filter
- events = 0
- if event_mask == select.KQ_FILTER_READ:
- events |= EVENT_READ
- if event_mask == select.KQ_FILTER_WRITE:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- if key.fd not in ready_fds:
- ready_fds[key.fd] = (key, events & key.events)
- else:
- old_events = ready_fds[key.fd][1]
- ready_fds[key.fd] = (key, (events | old_events) & key.events)
-
- return list(ready_fds.values())
-
- def close(self):
- self._kqueue.close()
- super(KqueueSelector, self).close()
-
-
-if not hasattr(select, 'select'): # Platform-specific: AppEngine
- HAS_SELECT = False
-
-
-def _can_allocate(struct):
- """ Checks that select structs can be allocated by the underlying
- operating system, not just advertised by the select module. We don't
- check select() because we'll be hopeful that most platforms that
- don't have it available will not advertise it. (ie: GAE) """
- try:
- # select.poll() objects won't fail until used.
- if struct == 'poll':
- p = select.poll()
- p.poll(0)
-
- # All others will fail on allocation.
- else:
- getattr(select, struct)().close()
- return True
- except (OSError, AttributeError) as e:
- return False
-
-
-# Choose the best implementation, roughly:
-# kqueue == epoll > poll > select. Devpoll not supported. (See above)
-# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
-def DefaultSelector():
- """ This function serves as a first call for DefaultSelector to
- detect if the select module is being monkey-patched incorrectly
- by eventlet, greenlet, and preserve proper behavior. """
- global _DEFAULT_SELECTOR
- if _DEFAULT_SELECTOR is None:
- if _can_allocate('kqueue'):
- _DEFAULT_SELECTOR = KqueueSelector
- elif _can_allocate('epoll'):
- _DEFAULT_SELECTOR = EpollSelector
- elif _can_allocate('poll'):
- _DEFAULT_SELECTOR = PollSelector
- elif hasattr(select, 'select'):
- _DEFAULT_SELECTOR = SelectSelector
- else: # Platform-specific: AppEngine
- raise ValueError('Platform does not have a selector')
- return _DEFAULT_SELECTOR()
diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/python.d/python_modules/urllib3/util/ssl_.py
deleted file mode 100644
index 33d428ed8..000000000
--- a/python.d/python_modules/urllib3/util/ssl_.py
+++ /dev/null
@@ -1,337 +0,0 @@
-from __future__ import absolute_import
-import errno
-import warnings
-import hmac
-
-from binascii import hexlify, unhexlify
-from hashlib import md5, sha1, sha256
-
-from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
-
-
-SSLContext = None
-HAS_SNI = False
-IS_PYOPENSSL = False
-IS_SECURETRANSPORT = False
-
-# Maps the length of a digest to a possible hash function producing this digest
-HASHFUNC_MAP = {
- 32: md5,
- 40: sha1,
- 64: sha256,
-}
-
-
-def _const_compare_digest_backport(a, b):
- """
- Compare two digests of equal length in constant time.
-
- The digests must be of type str/bytes.
- Returns True if the digests match, and False otherwise.
- """
- result = abs(len(a) - len(b))
- for l, r in zip(bytearray(a), bytearray(b)):
- result |= l ^ r
- return result == 0
-
-
-_const_compare_digest = getattr(hmac, 'compare_digest',
- _const_compare_digest_backport)
-
-
-try: # Test for SSL features
- import ssl
- from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
- from ssl import HAS_SNI # Has SNI?
-except ImportError:
- pass
-
-
-try:
- from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
-except ImportError:
- OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
- OP_NO_COMPRESSION = 0x20000
-
-# A secure default.
-# Sources for more information on TLS ciphers:
-#
-# - https://wiki.mozilla.org/Security/Server_Side_TLS
-# - https://www.ssllabs.com/projects/best-practices/index.html
-# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
-#
-# The general intent is:
-# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
-# - prefer ECDHE over DHE for better performance,
-# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
-# security,
-# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
-# - disable NULL authentication, MD5 MACs and DSS for security reasons.
-DEFAULT_CIPHERS = ':'.join([
- 'ECDH+AESGCM',
- 'ECDH+CHACHA20',
- 'DH+AESGCM',
- 'DH+CHACHA20',
- 'ECDH+AES256',
- 'DH+AES256',
- 'ECDH+AES128',
- 'DH+AES',
- 'RSA+AESGCM',
- 'RSA+AES',
- '!aNULL',
- '!eNULL',
- '!MD5',
-])
-
-try:
- from ssl import SSLContext # Modern SSL?
-except ImportError:
- import sys
-
- class SSLContext(object): # Platform-specific: Python 2 & 3.1
- supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
- (3, 2) <= sys.version_info)
-
- def __init__(self, protocol_version):
- self.protocol = protocol_version
- # Use default values from a real SSLContext
- self.check_hostname = False
- self.verify_mode = ssl.CERT_NONE
- self.ca_certs = None
- self.options = 0
- self.certfile = None
- self.keyfile = None
- self.ciphers = None
-
- def load_cert_chain(self, certfile, keyfile):
- self.certfile = certfile
- self.keyfile = keyfile
-
- def load_verify_locations(self, cafile=None, capath=None):
- self.ca_certs = cafile
-
- if capath is not None:
- raise SSLError("CA directories not supported in older Pythons")
-
- def set_ciphers(self, cipher_suite):
- if not self.supports_set_ciphers:
- raise TypeError(
- 'Your version of Python does not support setting '
- 'a custom cipher suite. Please upgrade to Python '
- '2.7, 3.2, or later if you need this functionality.'
- )
- self.ciphers = cipher_suite
-
- def wrap_socket(self, socket, server_hostname=None, server_side=False):
- warnings.warn(
- 'A true SSLContext object is not available. This prevents '
- 'urllib3 from configuring SSL appropriately and may cause '
- 'certain SSL connections to fail. You can upgrade to a newer '
- 'version of Python to solve this. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings',
- InsecurePlatformWarning
- )
- kwargs = {
- 'keyfile': self.keyfile,
- 'certfile': self.certfile,
- 'ca_certs': self.ca_certs,
- 'cert_reqs': self.verify_mode,
- 'ssl_version': self.protocol,
- 'server_side': server_side,
- }
- if self.supports_set_ciphers: # Platform-specific: Python 2.7+
- return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
- else: # Platform-specific: Python 2.6
- return wrap_socket(socket, **kwargs)
-
-
-def assert_fingerprint(cert, fingerprint):
- """
- Checks if given fingerprint matches the supplied certificate.
-
- :param cert:
- Certificate as bytes object.
- :param fingerprint:
- Fingerprint as string of hexdigits, can be interspersed by colons.
- """
-
- fingerprint = fingerprint.replace(':', '').lower()
- digest_length = len(fingerprint)
- hashfunc = HASHFUNC_MAP.get(digest_length)
- if not hashfunc:
- raise SSLError(
- 'Fingerprint of invalid length: {0}'.format(fingerprint))
-
- # We need encode() here for py32; works on py2 and p33.
- fingerprint_bytes = unhexlify(fingerprint.encode())
-
- cert_digest = hashfunc(cert).digest()
-
- if not _const_compare_digest(cert_digest, fingerprint_bytes):
- raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
- .format(fingerprint, hexlify(cert_digest)))
-
-
-def resolve_cert_reqs(candidate):
- """
- Resolves the argument to a numeric constant, which can be passed to
- the wrap_socket function/method from the ssl module.
- Defaults to :data:`ssl.CERT_NONE`.
- If given a string it is assumed to be the name of the constant in the
- :mod:`ssl` module or its abbrevation.
- (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
- If it's neither `None` nor a string we assume it is already the numeric
- constant which can directly be passed to wrap_socket.
- """
- if candidate is None:
- return CERT_NONE
-
- if isinstance(candidate, str):
- res = getattr(ssl, candidate, None)
- if res is None:
- res = getattr(ssl, 'CERT_' + candidate)
- return res
-
- return candidate
-
-
-def resolve_ssl_version(candidate):
- """
- like resolve_cert_reqs
- """
- if candidate is None:
- return PROTOCOL_SSLv23
-
- if isinstance(candidate, str):
- res = getattr(ssl, candidate, None)
- if res is None:
- res = getattr(ssl, 'PROTOCOL_' + candidate)
- return res
-
- return candidate
-
-
-def create_urllib3_context(ssl_version=None, cert_reqs=None,
- options=None, ciphers=None):
- """All arguments have the same meaning as ``ssl_wrap_socket``.
-
- By default, this function does a lot of the same work that
- ``ssl.create_default_context`` does on Python 3.4+. It:
-
- - Disables SSLv2, SSLv3, and compression
- - Sets a restricted set of server ciphers
-
- If you wish to enable SSLv3, you can do::
-
- from urllib3.util import ssl_
- context = ssl_.create_urllib3_context()
- context.options &= ~ssl_.OP_NO_SSLv3
-
- You can do the same to enable compression (substituting ``COMPRESSION``
- for ``SSLv3`` in the last line above).
-
- :param ssl_version:
- The desired protocol version to use. This will default to
- PROTOCOL_SSLv23 which will negotiate the highest protocol that both
- the server and your installation of OpenSSL support.
- :param cert_reqs:
- Whether to require the certificate verification. This defaults to
- ``ssl.CERT_REQUIRED``.
- :param options:
- Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
- ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
- :param ciphers:
- Which cipher suites to allow the server to select.
- :returns:
- Constructed SSLContext object with specified options
- :rtype: SSLContext
- """
- context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
-
- # Setting the default here, as we may have no ssl module on import
- cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
-
- if options is None:
- options = 0
- # SSLv2 is easily broken and is considered harmful and dangerous
- options |= OP_NO_SSLv2
- # SSLv3 has several problems and is now dangerous
- options |= OP_NO_SSLv3
- # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
- # (issue #309)
- options |= OP_NO_COMPRESSION
-
- context.options |= options
-
- if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
- context.set_ciphers(ciphers or DEFAULT_CIPHERS)
-
- context.verify_mode = cert_reqs
- if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
- # We do our own verification, including fingerprints and alternative
- # hostnames. So disable it here
- context.check_hostname = False
- return context
-
-
-def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
- ca_certs=None, server_hostname=None,
- ssl_version=None, ciphers=None, ssl_context=None,
- ca_cert_dir=None):
- """
- All arguments except for server_hostname, ssl_context, and ca_cert_dir have
- the same meaning as they do when using :func:`ssl.wrap_socket`.
-
- :param server_hostname:
- When SNI is supported, the expected hostname of the certificate
- :param ssl_context:
- A pre-made :class:`SSLContext` object. If none is provided, one will
- be created using :func:`create_urllib3_context`.
- :param ciphers:
- A string of ciphers we wish the client to support. This is not
- supported on Python 2.6 as the ssl module does not support it.
- :param ca_cert_dir:
- A directory containing CA certificates in multiple separate files, as
- supported by OpenSSL's -CApath flag or the capath argument to
- SSLContext.load_verify_locations().
- """
- context = ssl_context
- if context is None:
- # Note: This branch of code and all the variables in it are no longer
- # used by urllib3 itself. We should consider deprecating and removing
- # this code.
- context = create_urllib3_context(ssl_version, cert_reqs,
- ciphers=ciphers)
-
- if ca_certs or ca_cert_dir:
- try:
- context.load_verify_locations(ca_certs, ca_cert_dir)
- except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
- raise SSLError(e)
- # Py33 raises FileNotFoundError which subclasses OSError
- # These are not equivalent unless we check the errno attribute
- except OSError as e: # Platform-specific: Python 3.3 and beyond
- if e.errno == errno.ENOENT:
- raise SSLError(e)
- raise
- elif getattr(context, 'load_default_certs', None) is not None:
- # try to load OS default certs; works well on Windows (require Python3.4+)
- context.load_default_certs()
-
- if certfile:
- context.load_cert_chain(certfile, keyfile)
- if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
- return context.wrap_socket(sock, server_hostname=server_hostname)
-
- warnings.warn(
- 'An HTTPS request has been made, but the SNI (Subject Name '
- 'Indication) extension to TLS is not available on this platform. '
- 'This may cause the server to present an incorrect TLS '
- 'certificate, which can cause validation failures. You can upgrade to '
- 'a newer version of Python to solve this. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings',
- SNIMissingWarning
- )
- return context.wrap_socket(sock)
diff --git a/python.d/python_modules/urllib3/util/timeout.py b/python.d/python_modules/urllib3/util/timeout.py
deleted file mode 100644
index cec817e6e..000000000
--- a/python.d/python_modules/urllib3/util/timeout.py
+++ /dev/null
@@ -1,242 +0,0 @@
-from __future__ import absolute_import
-# The default socket timeout, used by httplib to indicate that no timeout was
-# specified by the user
-from socket import _GLOBAL_DEFAULT_TIMEOUT
-import time
-
-from ..exceptions import TimeoutStateError
-
-# A sentinel value to indicate that no timeout was specified by the user in
-# urllib3
-_Default = object()
-
-
-# Use time.monotonic if available.
-current_time = getattr(time, "monotonic", time.time)
-
-
-class Timeout(object):
- """ Timeout configuration.
-
- Timeouts can be defined as a default for a pool::
-
- timeout = Timeout(connect=2.0, read=7.0)
- http = PoolManager(timeout=timeout)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool)::
-
- response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
-
- Timeouts can be disabled by setting all the parameters to ``None``::
-
- no_timeout = Timeout(connect=None, read=None)
- response = http.request('GET', 'http://example.com/, timeout=no_timeout)
-
-
- :param total:
- This combines the connect and read timeouts into one; the read timeout
- will be set to the time leftover from the connect attempt. In the
- event that both a connect timeout and a total are specified, or a read
- timeout and a total are specified, the shorter timeout will be applied.
-
- Defaults to None.
-
- :type total: integer, float, or None
-
- :param connect:
- The maximum amount of time to wait for a connection attempt to a server
- to succeed. Omitting the parameter will default the connect timeout to
- the system default, probably `the global default timeout in socket.py
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
- None will set an infinite timeout for connection attempts.
-
- :type connect: integer, float, or None
-
- :param read:
- The maximum amount of time to wait between consecutive
- read operations for a response from the server. Omitting
- the parameter will default the read timeout to the system
- default, probably `the global default timeout in socket.py
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
- None will set an infinite timeout.
-
- :type read: integer, float, or None
-
- .. note::
-
- Many factors can affect the total amount of time for urllib3 to return
- an HTTP response.
-
- For example, Python's DNS resolver does not obey the timeout specified
- on the socket. Other factors that can affect total request time include
- high CPU load, high swap, the program running at a low priority level,
- or other behaviors.
-
- In addition, the read and total timeouts only measure the time between
- read operations on the socket connecting the client and the server,
- not the total amount of time for the request to return a complete
- response. For most requests, the timeout is raised because the server
- has not sent the first byte in the specified time. This is not always
- the case; if a server streams one byte every fifteen seconds, a timeout
- of 20 seconds will not trigger, even though the request will take
- several minutes to complete.
-
- If your goal is to cut off any request after a set amount of wall clock
- time, consider having a second "watcher" thread to cut off a slow
- request.
- """
-
- #: A sentinel object representing the default timeout value
- DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
-
- def __init__(self, total=None, connect=_Default, read=_Default):
- self._connect = self._validate_timeout(connect, 'connect')
- self._read = self._validate_timeout(read, 'read')
- self.total = self._validate_timeout(total, 'total')
- self._start_connect = None
-
- def __str__(self):
- return '%s(connect=%r, read=%r, total=%r)' % (
- type(self).__name__, self._connect, self._read, self.total)
-
- @classmethod
- def _validate_timeout(cls, value, name):
- """ Check that a timeout attribute is valid.
-
- :param value: The timeout value to validate
- :param name: The name of the timeout attribute to validate. This is
- used to specify in error messages.
- :return: The validated and casted version of the given value.
- :raises ValueError: If it is a numeric value less than or equal to
- zero, or the type is not an integer, float, or None.
- """
- if value is _Default:
- return cls.DEFAULT_TIMEOUT
-
- if value is None or value is cls.DEFAULT_TIMEOUT:
- return value
-
- if isinstance(value, bool):
- raise ValueError("Timeout cannot be a boolean value. It must "
- "be an int, float or None.")
- try:
- float(value)
- except (TypeError, ValueError):
- raise ValueError("Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value))
-
- try:
- if value <= 0:
- raise ValueError("Attempted to set %s timeout to %s, but the "
- "timeout cannot be set to a value less "
- "than or equal to 0." % (name, value))
- except TypeError: # Python 3
- raise ValueError("Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value))
-
- return value
-
- @classmethod
- def from_float(cls, timeout):
- """ Create a new Timeout from a legacy timeout value.
-
- The timeout value used by httplib.py sets the same timeout on the
- connect(), and recv() socket requests. This creates a :class:`Timeout`
- object that sets the individual timeouts to the ``timeout`` value
- passed to this function.
-
- :param timeout: The legacy timeout value.
- :type timeout: integer, float, sentinel default object, or None
- :return: Timeout object
- :rtype: :class:`Timeout`
- """
- return Timeout(read=timeout, connect=timeout)
-
- def clone(self):
- """ Create a copy of the timeout object
-
- Timeout properties are stored per-pool but each request needs a fresh
- Timeout object to ensure each one has its own start/stop configured.
-
- :return: a copy of the timeout object
- :rtype: :class:`Timeout`
- """
- # We can't use copy.deepcopy because that will also create a new object
- # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
- # detect the user default.
- return Timeout(connect=self._connect, read=self._read,
- total=self.total)
-
- def start_connect(self):
- """ Start the timeout clock, used during a connect() attempt
-
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to start a timer that has been started already.
- """
- if self._start_connect is not None:
- raise TimeoutStateError("Timeout timer has already been started.")
- self._start_connect = current_time()
- return self._start_connect
-
- def get_connect_duration(self):
- """ Gets the time elapsed since the call to :meth:`start_connect`.
-
- :return: Elapsed time.
- :rtype: float
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to get duration for a timer that hasn't been started.
- """
- if self._start_connect is None:
- raise TimeoutStateError("Can't get connect duration for timer "
- "that has not started.")
- return current_time() - self._start_connect
-
- @property
- def connect_timeout(self):
- """ Get the value to use when setting a connection timeout.
-
- This will be a positive float or integer, the value None
- (never timeout), or the default system timeout.
-
- :return: Connect timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- """
- if self.total is None:
- return self._connect
-
- if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
- return self.total
-
- return min(self._connect, self.total)
-
- @property
- def read_timeout(self):
- """ Get the value for the read timeout.
-
- This assumes some time has elapsed in the connection timeout and
- computes the read timeout appropriately.
-
- If self.total is set, the read timeout is dependent on the amount of
- time taken by the connect timeout. If the connection time has not been
- established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
- raised.
-
- :return: Value to use for the read timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
- has not yet been called on this object.
- """
- if (self.total is not None and
- self.total is not self.DEFAULT_TIMEOUT and
- self._read is not None and
- self._read is not self.DEFAULT_TIMEOUT):
- # In case the connect timeout has not yet been established.
- if self._start_connect is None:
- return self._read
- return max(0, min(self.total - self.get_connect_duration(),
- self._read))
- elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
- return max(0, self.total - self.get_connect_duration())
- else:
- return self._read
diff --git a/python.d/python_modules/urllib3/util/url.py b/python.d/python_modules/urllib3/util/url.py
deleted file mode 100644
index 6b6f9968d..000000000
--- a/python.d/python_modules/urllib3/util/url.py
+++ /dev/null
@@ -1,230 +0,0 @@
-from __future__ import absolute_import
-from collections import namedtuple
-
-from ..exceptions import LocationParseError
-
-
-url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
-
-# We only want to normalize urls with an HTTP(S) scheme.
-# urllib3 infers URLs without a scheme (None) to be http.
-NORMALIZABLE_SCHEMES = ('http', 'https', None)
-
-
-class Url(namedtuple('Url', url_attrs)):
- """
- Datastructure for representing an HTTP URL. Used as a return value for
- :func:`parse_url`. Both the scheme and host are normalized as they are
- both case-insensitive according to RFC 3986.
- """
- __slots__ = ()
-
- def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
- query=None, fragment=None):
- if path and not path.startswith('/'):
- path = '/' + path
- if scheme:
- scheme = scheme.lower()
- if host and scheme in NORMALIZABLE_SCHEMES:
- host = host.lower()
- return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
- query, fragment)
-
- @property
- def hostname(self):
- """For backwards-compatibility with urlparse. We're nice like that."""
- return self.host
-
- @property
- def request_uri(self):
- """Absolute path including the query string."""
- uri = self.path or '/'
-
- if self.query is not None:
- uri += '?' + self.query
-
- return uri
-
- @property
- def netloc(self):
- """Network location including host and port"""
- if self.port:
- return '%s:%d' % (self.host, self.port)
- return self.host
-
- @property
- def url(self):
- """
- Convert self into a url
-
- This function should more or less round-trip with :func:`.parse_url`. The
- returned url may not be exactly the same as the url inputted to
- :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
- with a blank port will have : removed).
-
- Example: ::
-
- >>> U = parse_url('http://google.com/mail/')
- >>> U.url
- 'http://google.com/mail/'
- >>> Url('http', 'username:password', 'host.com', 80,
- ... '/path', 'query', 'fragment').url
- 'http://username:password@host.com:80/path?query#fragment'
- """
- scheme, auth, host, port, path, query, fragment = self
- url = ''
-
- # We use "is not None" we want things to happen with empty strings (or 0 port)
- if scheme is not None:
- url += scheme + '://'
- if auth is not None:
- url += auth + '@'
- if host is not None:
- url += host
- if port is not None:
- url += ':' + str(port)
- if path is not None:
- url += path
- if query is not None:
- url += '?' + query
- if fragment is not None:
- url += '#' + fragment
-
- return url
-
- def __str__(self):
- return self.url
-
-
-def split_first(s, delims):
- """
- Given a string and an iterable of delimiters, split on the first found
- delimiter. Return two split parts and the matched delimiter.
-
- If not found, then the first part is the full input string.
-
- Example::
-
- >>> split_first('foo/bar?baz', '?/=')
- ('foo', 'bar?baz', '/')
- >>> split_first('foo/bar?baz', '123')
- ('foo/bar?baz', '', None)
-
- Scales linearly with number of delims. Not ideal for large number of delims.
- """
- min_idx = None
- min_delim = None
- for d in delims:
- idx = s.find(d)
- if idx < 0:
- continue
-
- if min_idx is None or idx < min_idx:
- min_idx = idx
- min_delim = d
-
- if min_idx is None or min_idx < 0:
- return s, '', None
-
- return s[:min_idx], s[min_idx + 1:], min_delim
-
-
-def parse_url(url):
- """
- Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
- performed to parse incomplete urls. Fields not provided will be None.
-
- Partly backwards-compatible with :mod:`urlparse`.
-
- Example::
-
- >>> parse_url('http://google.com/mail/')
- Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
- >>> parse_url('google.com:80')
- Url(scheme=None, host='google.com', port=80, path=None, ...)
- >>> parse_url('/foo?bar')
- Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
- """
-
- # While this code has overlap with stdlib's urlparse, it is much
- # simplified for our needs and less annoying.
- # Additionally, this implementations does silly things to be optimal
- # on CPython.
-
- if not url:
- # Empty
- return Url()
-
- scheme = None
- auth = None
- host = None
- port = None
- path = None
- fragment = None
- query = None
-
- # Scheme
- if '://' in url:
- scheme, url = url.split('://', 1)
-
- # Find the earliest Authority Terminator
- # (http://tools.ietf.org/html/rfc3986#section-3.2)
- url, path_, delim = split_first(url, ['/', '?', '#'])
-
- if delim:
- # Reassemble the path
- path = delim + path_
-
- # Auth
- if '@' in url:
- # Last '@' denotes end of auth part
- auth, url = url.rsplit('@', 1)
-
- # IPv6
- if url and url[0] == '[':
- host, url = url.split(']', 1)
- host += ']'
-
- # Port
- if ':' in url:
- _host, port = url.split(':', 1)
-
- if not host:
- host = _host
-
- if port:
- # If given, ports must be integers. No whitespace, no plus or
- # minus prefixes, no non-integer digits such as ^2 (superscript).
- if not port.isdigit():
- raise LocationParseError(url)
- try:
- port = int(port)
- except ValueError:
- raise LocationParseError(url)
- else:
- # Blank ports are cool, too. (rfc3986#section-3.2.3)
- port = None
-
- elif not host and url:
- host = url
-
- if not path:
- return Url(scheme, auth, host, port, path, query, fragment)
-
- # Fragment
- if '#' in path:
- path, fragment = path.split('#', 1)
-
- # Query
- if '?' in path:
- path, query = path.split('?', 1)
-
- return Url(scheme, auth, host, port, path, query, fragment)
-
-
-def get_host(url):
- """
- Deprecated. Use :func:`parse_url` instead.
- """
- p = parse_url(url)
- return p.scheme or 'http', p.hostname, p.port
diff --git a/python.d/python_modules/urllib3/util/wait.py b/python.d/python_modules/urllib3/util/wait.py
deleted file mode 100644
index cb396e508..000000000
--- a/python.d/python_modules/urllib3/util/wait.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from .selectors import (
- HAS_SELECT,
- DefaultSelector,
- EVENT_READ,
- EVENT_WRITE
-)
-
-
-def _wait_for_io_events(socks, events, timeout=None):
- """ Waits for IO events to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be interacted with immediately. """
- if not HAS_SELECT:
- raise ValueError('Platform does not have a selector')
- if not isinstance(socks, list):
- # Probably just a single socket.
- if hasattr(socks, "fileno"):
- socks = [socks]
- # Otherwise it might be a non-list iterable.
- else:
- socks = list(socks)
- with DefaultSelector() as selector:
- for sock in socks:
- selector.register(sock, events)
- return [key[0].fileobj for key in
- selector.select(timeout) if key[1] & events]
-
-
-def wait_for_read(socks, timeout=None):
- """ Waits for reading to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be read from immediately. """
- return _wait_for_io_events(socks, EVENT_READ, timeout)
-
-
-def wait_for_write(socks, timeout=None):
- """ Waits for writing to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be written to immediately. """
- return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/python.d/rabbitmq.chart.py b/python.d/rabbitmq.chart.py
deleted file mode 100644
index b8847e9f8..000000000
--- a/python.d/rabbitmq.chart.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: rabbitmq netdata python.d module
-# Author: l2isbad
-
-from collections import namedtuple
-from json import loads
-from socket import gethostbyname, gaierror
-from threading import Thread
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
-
-NODE_STATS = ['fd_used',
- 'mem_used',
- 'sockets_used',
- 'proc_used',
- 'disk_free',
- 'run_queue'
- ]
-OVERVIEW_STATS = ['object_totals.channels',
- 'object_totals.consumers',
- 'object_totals.connections',
- 'object_totals.queues',
- 'object_totals.exchanges',
- 'queue_totals.messages_ready',
- 'queue_totals.messages_unacknowledged',
- 'message_stats.ack',
- 'message_stats.redeliver',
- 'message_stats.deliver',
- 'message_stats.publish'
- ]
-ORDER = ['queued_messages', 'message_rates', 'global_counts',
- 'file_descriptors', 'socket_descriptors', 'erlang_processes', 'erlang_run_queue', 'memory', 'disk_space']
-
-CHARTS = {
- 'file_descriptors': {
- 'options': [None, 'File Descriptors', 'descriptors', 'overview',
- 'rabbitmq.file_descriptors', 'line'],
- 'lines': [
- ['fd_used', 'used', 'absolute']
- ]},
- 'memory': {
- 'options': [None, 'Memory', 'MB', 'overview',
- 'rabbitmq.memory', 'line'],
- 'lines': [
- ['mem_used', 'used', 'absolute', 1, 1024 << 10]
- ]},
- 'disk_space': {
- 'options': [None, 'Disk Space', 'GB', 'overview',
- 'rabbitmq.disk_space', 'line'],
- 'lines': [
- ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
- ]},
- 'socket_descriptors': {
- 'options': [None, 'Socket Descriptors', 'descriptors', 'overview',
- 'rabbitmq.sockets', 'line'],
- 'lines': [
- ['sockets_used', 'used', 'absolute']
- ]},
- 'erlang_processes': {
- 'options': [None, 'Erlang Processes', 'processes', 'overview',
- 'rabbitmq.processes', 'line'],
- 'lines': [
- ['proc_used', 'used', 'absolute']
- ]},
- 'erlang_run_queue': {
- 'options': [None, 'Erlang Run Queue', 'processes', 'overview',
- 'rabbitmq.erlang_run_queue', 'line'],
- 'lines': [
- ['run_queue',' length', 'absolute']
- ]},
- 'global_counts': {
- 'options': [None, 'Global Counts', 'counts', 'overview',
- 'rabbitmq.global_counts', 'line'],
- 'lines': [
- ['object_totals_channels', 'channels', 'absolute'],
- ['object_totals_consumers', 'consumers', 'absolute'],
- ['object_totals_connections', 'connections', 'absolute'],
- ['object_totals_queues', 'queues', 'absolute'],
- ['object_totals_exchanges', 'exchanges', 'absolute']
- ]},
- 'queued_messages': {
- 'options': [None, 'Queued Messages', 'messages', 'overview',
- 'rabbitmq.queued_messages', 'stacked'],
- 'lines': [
- ['queue_totals_messages_ready', 'ready', 'absolute'],
- ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
- ]},
- 'message_rates': {
- 'options': [None, 'Message Rates', 'messages/s', 'overview',
- 'rabbitmq.message_rates', 'stacked'],
- 'lines': [
- ['message_stats_ack', 'ack', 'incremental'],
- ['message_stats_redeliver', 'redeliver', 'incremental'],
- ['message_stats_deliver', 'deliver', 'incremental'],
- ['message_stats_publish', 'publish', 'incremental']
- ]}
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 15672)
- self.scheme = self.configuration.get('scheme', 'http')
-
- def check(self):
- # We can't start if <host> AND <port> not specified
- if not (self.host and self.port):
- self.error('Host is not defined in the module configuration file')
- return False
-
- # Hostname -> ip address
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(str(error))
- return False
-
- # Add handlers (auth, self signed cert accept)
- self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme,
- host=self.host,
- port=self.port)
- # Add methods
- api_node = self.url + '/nodes'
- api_overview = self.url + '/overview'
- self.methods = [METHODS(get_data=self._get_overview_stats,
- url=api_node,
- stats=NODE_STATS),
- METHODS(get_data=self._get_overview_stats,
- url=api_overview,
- stats=OVERVIEW_STATS)]
- return UrlService.check(self)
-
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
-
- for method in self.methods:
- th = Thread(target=method.get_data,
- args=(queue, method.url, method.stats))
- th.start()
- threads.append(th)
-
- for thread in threads:
- thread.join()
- result.update(queue.get())
-
- return result or None
-
- def _get_overview_stats(self, queue, url, stats):
- """
- Format data received from http request
- :return: dict
- """
-
- raw_data = self._get_raw_data(url)
-
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- data = data[0] if isinstance(data, list) else data
-
- to_netdata = fetch_data(raw_data=data, metrics=stats)
- return queue.put(to_netdata)
-
-
-def fetch_data(raw_data, metrics):
- data = dict()
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError:
- continue
- data['_'.join(metrics_list)] = value
- return data
diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py
deleted file mode 100644
index bcfcf16a6..000000000
--- a/python.d/redis.chart.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: redis netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 6379,
-# 'unix_socket': None
-# }}
-
-ORDER = ['operations', 'hit_rate', 'memory', 'keys', 'net', 'connections', 'clients', 'slaves', 'persistence',
- 'bgsave_now', 'bgsave_health']
-
-CHARTS = {
- 'operations': {
- 'options': [None, 'Redis Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
- 'lines': [
- ['total_commands_processed', 'commands', 'incremental'],
- ['instantaneous_ops_per_sec', 'operations', 'absolute']
- ]},
- 'hit_rate': {
- 'options': [None, 'Redis Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
- 'lines': [
- ['hit_rate', 'rate', 'absolute']
- ]},
- 'memory': {
- 'options': [None, 'Redis Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
- 'lines': [
- ['used_memory', 'total', 'absolute', 1, 1024],
- ['used_memory_lua', 'lua', 'absolute', 1, 1024]
- ]},
- 'net': {
- 'options': [None, 'Redis Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
- 'lines': [
- ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
- ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
- ]},
- 'keys': {
- 'options': [None, 'Redis Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]},
- 'connections': {
- 'options': [None, 'Redis Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
- 'lines': [
- ['total_connections_received', 'received', 'incremental', 1],
- ['rejected_connections', 'rejected', 'incremental', -1]
- ]},
- 'clients': {
- 'options': [None, 'Redis Clients', 'clients', 'connections', 'redis.clients', 'line'],
- 'lines': [
- ['connected_clients', 'connected', 'absolute', 1],
- ['blocked_clients', 'blocked', 'absolute', -1]
- ]},
- 'slaves': {
- 'options': [None, 'Redis Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
- 'lines': [
- ['connected_slaves', 'connected', 'absolute']
- ]},
- 'persistence': {
- 'options': [None, 'Redis Persistence Changes Since Last Save', 'changes', 'persistence',
- 'redis.rdb_changes', 'line'],
- 'lines': [
- ['rdb_changes_since_last_save', 'changes', 'absolute']
- ]},
- 'bgsave_now': {
- 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
- 'redis.bgsave_now', 'absolute'],
- 'lines': [
- ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
- ]},
- 'bgsave_health': {
- 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
- 'redis.bgsave_health', 'line'],
- 'lines': [
- ['rdb_last_bgsave_status', 'rdb save', 'absolute']
- ]}
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self._keep_alive = True
- self.chart_name = ""
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 6379)
- self.unix_socket = self.configuration.get('socket')
- password = self.configuration.get('pass', str())
- self.bgsave_time = 0
- self.requests = dict(request='INFO\r\n'.encode(),
- password=' '.join(['AUTH', password, '\r\n']).encode() if password else None)
- self.request = self.requests['request']
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- if self.requests['password']:
- self.request = self.requests['password']
- raw = self._get_raw_data().strip()
- if raw != "+OK":
- self.error("invalid password")
- return None
- self.request = self.requests['request']
- response = self._get_raw_data()
- if response is None:
- # error has already been logged
- return None
-
- try:
- parsed = response.split("\n")
- except AttributeError:
- self.error("response is invalid/empty")
- return None
-
- data = dict()
- for line in parsed:
- if len(line) < 5 or line[0] == '$' or line[0] == '#':
- continue
-
- if line.startswith('db'):
- tmp = line.split(',')[0].replace('keys=', '')
- record = tmp.split(':')
- data[record[0]] = record[1]
- continue
-
- try:
- t = line.split(':')
- data[t[0]] = t[1]
- except (IndexError, ValueError):
- self.debug("invalid line received: " + str(line))
-
- if not data:
- self.error("received data doesn't have any records")
- return None
-
- try:
- data['hit_rate'] = (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits'])
- + int(data['keyspace_misses']))
- except (KeyError, ZeroDivisionError, TypeError):
- data['hit_rate'] = 0
-
- if data['rdb_bgsave_in_progress'] != '0\r':
- self.bgsave_time += self.update_every
- else:
- self.bgsave_time = 0
-
- data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok\r' else 1
- data['rdb_bgsave_in_progress'] = self.bgsave_time
-
- return data
-
- def _check_raw_data(self, data):
- """
- Check if all data has been gathered from socket.
- Parse first line containing message length and check against received message
- :param data: str
- :return: boolean
- """
- length = len(data)
- supposed = data.split('\n')[0][1:-1]
- offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
- if not supposed.isdigit():
- return True
- supposed = int(supposed)
-
- if length - offset >= supposed:
- self.debug("received full response from redis")
- return True
-
- self.debug("waiting more data from redis")
- return False
-
- def check(self):
- """
- Parse configuration, check if redis is available, and dynamically create chart lines data
- :return: boolean
- """
- data = self._get_data()
- if data is None:
- return False
-
- for name in data:
- if name.startswith('db'):
- self.definitions['keys']['lines'].append([name, None, 'absolute'])
- return True
diff --git a/python.d/retroshare.chart.py b/python.d/retroshare.chart.py
deleted file mode 100644
index 8c0330ec6..000000000
--- a/python.d/retroshare.chart.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: RetroShare netdata python.d module
-# Authors: sehraf
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers', 'dht']
-
-CHARTS = {
- 'bandwidth': {
- 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
- 'lines': [
- ['bandwidth_up_kb', 'Upload'],
- ['bandwidth_down_kb', 'Download']
- ]},
- 'peers': {
- 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
- 'lines': [
- ['peers_all', 'All friends'],
- ['peers_connected', 'Connected friends']
- ]},
- 'dht': {
- 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
- 'lines': [
- ['dht_size_all', 'DHT nodes estimated'],
- ['dht_size_rs', 'RS nodes estimated']
- ]}
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.baseurl = self.configuration.get('url', 'http://localhost:9090')
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_stats(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- parsed = json.loads(raw)
- if str(parsed['returncode']) != 'ok':
- return None
- except (TypeError, ValueError):
- return None
-
- return parsed['data'][0]
-
- def _get_data(self):
- """
- Get data from API
- :return: dict
- """
- self.url = self.baseurl + '/api/v2/stats'
- data = self._get_stats()
- if data is None:
- return None
-
- data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
- if data['dht_active'] is False:
- data['dht_size_all'] = None
- data['dht_size_rs'] = None
-
- return data
diff --git a/python.d/samba.chart.py b/python.d/samba.chart.py
deleted file mode 100644
index 3f4fd5a12..000000000
--- a/python.d/samba.chart.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: samba netdata python.d module
-# Author: Christopher Cox <chris_cox@endlessnow.com>
-#
-# The netdata user needs to be able to be able to sudo the smbstatus program
-# without password:
-# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-#
-# This makes calls to smbstatus -P
-#
-# This just looks at a couple of values out of syscall, and some from smb2.
-#
-# The Lesser Ops chart is merely a display of current counter values. They
-# didn't seem to change much to me. However, if you notice something changing
-# a lot there, bring one or more out into its own chart and make it incremental
-# (like find and notify... good examples).
-
-import re
-
-from bases.collection import find_binary
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-priority = 60000
-retries = 60
-
-ORDER = ['syscall_rw','smb2_rw','smb2_create_close','smb2_info','smb2_find','smb2_notify','smb2_sm_count']
-
-CHARTS = {
- 'syscall_rw': {
- 'lines': [
- ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
- ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
- ],
- 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area']
- },
- 'smb2_rw': {
- 'lines': [
- ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
- ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
- ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
- ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
- ],
- 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area']
- },
- 'smb2_create_close': {
- 'lines': [
- ['smb2_create_count', 'create', 'incremental', 1, 1],
- ['smb2_close_count', 'close', 'incremental', -1, 1]
- ],
- 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line']
- },
- 'smb2_info': {
- 'lines': [
- ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
- ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
- ],
- 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line']
- },
- 'smb2_find': {
- 'lines': [
- ['smb2_find_count', 'find', 'incremental', 1, 1]
- ],
- 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line']
- },
- 'smb2_notify': {
- 'lines': [
- ['smb2_notify_count', 'notify', 'incremental', 1, 1]
- ],
- 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line']
- },
- 'smb2_sm_count': {
- 'lines': [
- ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
- ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
- ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
- ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
- ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
- ['smb2_flush_count', 'flush', 'absolute', 1, 1],
- ['smb2_lock_count', 'lock', 'absolute', 1, 1],
- ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
- ['smb2_break_count', 'break', 'absolute', 1, 1],
- ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
- ],
- 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked']
- }
- }
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
-
- def check(self):
- sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
-
- if not (sudo_binary and smbstatus_binary):
- self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
- return False
-
- self.command = [sudo_binary, '-v']
- err = self._get_raw_data(stderr=True)
- if err:
- self.error(''.join(err))
- return False
-
- self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
-
- return ExecutableService.check(self)
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- parsed = self.rgx_smb2.findall(' '.join(raw_data))
-
- return dict(parsed) or None
diff --git a/python.d/sensors.chart.py b/python.d/sensors.chart.py
deleted file mode 100644
index 06e420b68..000000000
--- a/python.d/sensors.chart.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: sensors netdata python.d plugin
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from third_party import lm_sensors as sensors
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-
-ORDER = ['temperature', 'fan', 'voltage', 'current', 'power', 'energy', 'humidity']
-
-# This is a prototype of chart definition which is used to dynamically create self.definitions
-CHARTS = {
- 'temperature': {
- 'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]},
- 'voltage': {
- 'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]},
- 'current': {
- 'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]},
- 'power': {
- 'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000000]
- ]},
- 'fan': {
- 'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]},
- 'energy': {
- 'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'],
- 'lines': [
- [None, None, 'incremental', 1, 1000000]
- ]},
- 'humidity': {
- 'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]}
-}
-
-LIMITS = {
- 'temperature': [-127, 1000],
- 'voltage': [-127, 127],
- 'current': [-127, 127],
- 'fan': [0, 65535]
-}
-
-TYPE_MAP = {
- 0: 'voltage',
- 1: 'fan',
- 2: 'temperature',
- 3: 'power',
- 4: 'energy',
- 5: 'current',
- 6: 'humidity',
- 7: 'max_main',
- 16: 'vid',
- 17: 'intrusion',
- 18: 'max_other',
- 24: 'beep_enable'
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.chips = list()
-
- def get_data(self):
- data = dict()
- try:
- for chip in sensors.ChipIterator():
- prefix = sensors.chip_snprintf_name(chip)
- for feature in sensors.FeatureIterator(chip):
- sfi = sensors.SubFeatureIterator(chip, feature)
- for sf in sfi:
- val = sensors.get_value(chip, sf.number)
- break
- type_name = TYPE_MAP[feature.type]
- if type_name in LIMITS:
- limit = LIMITS[type_name]
- if val < limit[0] or val > limit[1]:
- continue
- data[prefix + "_" + str(feature.name.decode())] = int(val * 1000)
- except Exception as error:
- self.error(error)
- return None
-
- return data or None
-
- def create_definitions(self):
- for sensor in ORDER:
- for chip in sensors.ChipIterator():
- chip_name = sensors.chip_snprintf_name(chip)
- if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]):
- continue
- for feature in sensors.FeatureIterator(chip):
- sfi = sensors.SubFeatureIterator(chip, feature)
- vals = [sensors.get_value(chip, sf.number) for sf in sfi]
- if vals[0] == 0:
- continue
- if TYPE_MAP[feature.type] == sensor:
- # create chart
- name = chip_name + "_" + TYPE_MAP[feature.type]
- if name not in self.order:
- self.order.append(name)
- chart_def = list(CHARTS[sensor]['options'])
- chart_def[1] = chip_name + chart_def[1]
- self.definitions[name] = {'options': chart_def}
- self.definitions[name]['lines'] = []
- line = list(CHARTS[sensor]['lines'][0])
- line[0] = chip_name + "_" + str(feature.name.decode())
- line[1] = sensors.get_label(chip, feature)
- self.definitions[name]['lines'].append(line)
-
- def check(self):
- try:
- sensors.init()
- except Exception as error:
- self.error(error)
- return False
-
- self.create_definitions()
-
- return True
-
diff --git a/python.d/smartd_log.chart.py b/python.d/smartd_log.chart.py
deleted file mode 100644
index 07ad88cd4..000000000
--- a/python.d/smartd_log.chart.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: smart netdata python.d module
-# Author: l2isbad, vorph1
-
-import os
-import re
-
-from collections import namedtuple
-from time import time
-
-from bases.collection import read_last_line
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200']
-
-SMART_ATTR = {
- '1': 'Read Error Rate',
- '2': 'Throughput Performance',
- '3': 'Spin-Up Time',
- '4': 'Start/Stop Count',
- '5': 'Reallocated Sectors Count',
- '6': 'Read Channel Margin',
- '7': 'Seek Error Rate',
- '8': 'Seek Time Performance',
- '9': 'Power-On Hours Count',
- '10': 'Spin-up Retries',
- '11': 'Calibration Retries',
- '12': 'Power Cycle Count',
- '13': 'Soft Read Error Rate',
- '100': 'Erase/Program Cycles',
- '103': 'Translation Table Rebuild',
- '108': 'Unknown (108)',
- '170': 'Reserved Block Count',
- '171': 'Program Fail Count',
- '172': 'Erase Fail Count',
- '173': 'Wear Leveller Worst Case Erase Count',
- '174': 'Unexpected Power Loss',
- '175': 'Program Fail Count',
- '176': 'Erase Fail Count',
- '177': 'Wear Leveling Count',
- '178': 'Used Reserved Block Count',
- '179': 'Used Reserved Block Count',
- '180': 'Unused Reserved Block Count',
- '181': 'Program Fail Count',
- '182': 'Erase Fail Count',
- '183': 'SATA Downshifts',
- '184': 'End-to-End error',
- '185': 'Head Stability',
- '186': 'Induced Op-Vibration Detection',
- '187': 'Reported Uncorrectable Errors',
- '188': 'Command Timeout',
- '189': 'High Fly Writes',
- '190': 'Temperature',
- '191': 'G-Sense Errors',
- '192': 'Power-Off Retract Cycles',
- '193': 'Load/Unload Cycles',
- '194': 'Temperature',
- '195': 'Hardware ECC Recovered',
- '196': 'Reallocation Events',
- '197': 'Current Pending Sectors',
- '198': 'Off-line Uncorrectable',
- '199': 'UDMA CRC Error Rate',
- '200': 'Write Error Rate',
- '201': 'Soft Read Errors',
- '202': 'Data Address Mark Errors',
- '203': 'Run Out Cancel',
- '204': 'Soft ECC Corrections',
- '205': 'Thermal Asperity Rate',
- '206': 'Flying Height',
- '207': 'Spin High Current',
- '209': 'Offline Seek Performance',
- '220': 'Disk Shift',
- '221': 'G-Sense Error Rate',
- '222': 'Loaded Hours',
- '223': 'Load/Unload Retries',
- '224': 'Load Friction',
- '225': 'Load/Unload Cycles',
- '226': 'Load-in Time',
- '227': 'Torque Amplification Count',
- '228': 'Power-Off Retracts',
- '230': 'GMR Head Amplitude',
- '231': 'Temperature',
- '232': 'Available Reserved Space',
- '233': 'Media Wearout Indicator',
- '240': 'Head Flying Hours',
- '241': 'Total LBAs Written',
- '242': 'Total LBAs Read',
- '250': 'Read Error Retry Rate'
-}
-
-LIMIT = namedtuple('LIMIT', ['min', 'max'])
-
-LIMITS = {
- '194': LIMIT(0, 200)
-}
-
-RESCAN_INTERVAL = 60
-
-REGEX = re.compile(
- '(\d+);' # attribute
- '(\d+);' # normalized value
- '(\d+)', # raw value
- re.X
-)
-
-
-def chart_template(chart_name):
- units, attr_id = chart_name.split('_')[-2:]
- title = '{value_type} {description}'.format(value_type=units.capitalize(),
- description=SMART_ATTR[attr_id])
- family = SMART_ATTR[attr_id].lower()
-
- return {
- chart_name: {
- 'options': [None, title, units, family, 'smartd_log.' + chart_name, 'line'],
- 'lines': []
- }
- }
-
-
-def handle_os_error(method):
- def on_call(*args):
- try:
- return method(*args)
- except OSError:
- return None
- return on_call
-
-
-class SmartAttribute(object):
- def __init__(self, idx, normalized, raw):
- self.id = idx
- self.normalized = normalized
- self._raw = raw
-
- @property
- def raw(self):
- if self.id in LIMITS:
- limit = LIMITS[self.id]
- if limit.min <= int(self._raw) <= limit.max:
- return self._raw
- return None
- return self._raw
-
- @raw.setter
- def raw(self, value):
- self._raw = value
-
-
-class DiskLogFile:
- def __init__(self, path):
- self.path = path
- self.size = os.path.getsize(path)
-
- @handle_os_error
- def is_changed(self):
- new_size = os.path.getsize(self.path)
- old_size, self.size = self.size, new_size
-
- return new_size != old_size and new_size
-
- @staticmethod
- @handle_os_error
- def is_valid(log_file, exclude):
- return all([log_file.endswith('.csv'),
- not [p for p in exclude if p in log_file],
- os.access(log_file, os.R_OK),
- os.path.getsize(log_file)])
-
-
-class Disk:
- def __init__(self, full_path, age):
- self.log_file = DiskLogFile(full_path)
- self.name = os.path.basename(full_path).split('.')[-3]
- self.age = int(age)
- self.status = True
- self.attributes = dict()
-
- self.get_attributes()
-
- def __eq__(self, other):
- if isinstance(other, Disk):
- return self.name == other.name
- return self.name == other
-
- @handle_os_error
- def is_active(self):
- return (time() - os.path.getmtime(self.log_file.path)) / 60 < self.age
-
- @handle_os_error
- def get_attributes(self):
- last_line = read_last_line(self.log_file.path)
- self.attributes = dict((attr, SmartAttribute(attr, normalized, raw)) for attr, normalized, raw
- in REGEX.findall(last_line))
- return True
-
- def data(self):
- data = dict()
- for attr in self.attributes.values():
- data['_'.join([self.name, 'normalized', attr.id])] = attr.normalized
- if attr.raw is not None:
- data['_'.join([self.name, 'raw', attr.id])] = attr.raw
- return data
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.log_path = self.configuration.get('log_path', '/var/log/smartd')
- self.raw = self.configuration.get('raw_values', True)
- self.exclude = self.configuration.get('exclude_disks', str()).split()
- self.age = self.configuration.get('age', 30)
-
- self.runs = 0
- self.disks = list()
- self.order = list()
- self.definitions = dict()
-
- def check(self):
- self.disks = self.scan()
-
- if not self.disks:
- return None
-
- user_defined_sa = self.configuration.get('smart_attributes')
-
- if user_defined_sa:
- order = user_defined_sa.split() or ORDER
- else:
- order = ORDER
-
- self.create_charts(order)
-
- return True
-
- def get_data(self):
- self.runs += 1
-
- if self.runs % RESCAN_INTERVAL == 0:
- self.cleanup_and_rescan()
-
- data = dict()
-
- for disk in self.disks:
-
- if not disk.status:
- continue
-
- changed = disk.log_file.is_changed()
-
- # True = changed, False = unchanged, None = Exception
- if changed is None:
- disk.status = False
- continue
-
- if changed:
- success = disk.get_attributes()
- if not success:
- disk.status = False
- continue
-
- data.update(disk.data())
-
- return data or None
-
- def create_charts(self, order):
- for attr in order:
- raw_name, normalized_name = 'attr_id_raw_' + attr, 'attr_id_normalized_' + attr
- raw, normalized = chart_template(raw_name), chart_template(normalized_name)
- self.order.extend([normalized_name, raw_name])
- self.definitions.update(raw)
- self.definitions.update(normalized)
-
- for disk in self.disks:
- if attr not in disk.attributes:
- self.debug("'{disk}' has no attribute '{attr_id}'".format(disk=disk.name,
- attr_id=attr))
- continue
- normalized[normalized_name]['lines'].append(['_'.join([disk.name, 'normalized', attr]), disk.name])
-
- if not self.raw:
- continue
-
- if disk.attributes[attr].raw is not None:
- raw[raw_name]['lines'].append(['_'.join([disk.name, 'raw', attr]), disk.name])
- continue
- self.debug("'{disk}' attribute '{attr_id}' value not in {limits}".format(disk=disk.name,
- attr_id=attr,
- limits=LIMITS[attr]))
-
- def cleanup_and_rescan(self):
- self.cleanup()
- new_disks = self.scan(only_new=True)
-
- for disk in new_disks:
- valid = False
-
- for chart in self.charts:
- value_type, idx = chart.id.split('_')[2:]
-
- if idx in disk.attributes:
- valid = True
- dimension_id = '_'.join([disk.name, value_type, idx])
-
- if dimension_id in chart:
- chart.hide_dimension(dimension_id=dimension_id, reverse=True)
- else:
- chart.add_dimension([dimension_id, disk.name])
- if valid:
- self.disks.append(disk)
-
- def cleanup(self):
- for disk in self.disks:
-
- if not disk.is_active():
- disk.status = False
- if not disk.status:
- for chart in self.charts:
- dimension_id = '_'.join([disk.name, chart.id[8:]])
- chart.hide_dimension(dimension_id=dimension_id)
-
- self.disks = [disk for disk in self.disks if disk.status]
-
- def scan(self, only_new=None):
- new_disks = list()
- for f in os.listdir(self.log_path):
- full_path = os.path.join(self.log_path, f)
-
- if DiskLogFile.is_valid(full_path, self.exclude):
- disk = Disk(full_path, self.age)
-
- active = disk.is_active()
- if active is None:
- continue
- if active:
- if not only_new:
- new_disks.append(disk)
- else:
- if disk not in self.disks:
- new_disks.append(disk)
- else:
- if not only_new:
- self.debug("'{disk}' not updated in the last {age} minutes, "
- "skipping it.".format(disk=disk.name, age=self.age))
- return new_disks
diff --git a/python.d/springboot.chart.py b/python.d/springboot.chart.py
deleted file mode 100644
index 60ad0cccb..000000000
--- a/python.d/springboot.chart.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: tomcat netdata python.d module
-# Author: Wing924
-
-import json
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-
-DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap']
-
-DEFAULT_CHARTS = {
- 'response_code': {
- 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
- 'lines': [
- ["resp_other", 'Other', 'incremental'],
- ["resp_1xx", '1xx', 'incremental'],
- ["resp_2xx", '2xx', 'incremental'],
- ["resp_3xx", '3xx', 'incremental'],
- ["resp_4xx", '4xx', 'incremental'],
- ["resp_5xx", '5xx', 'incremental'],
- ]},
- 'threads': {
- 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
- 'lines': [
- ["threads_daemon", 'daemon', 'absolute'],
- ["threads", 'total', 'absolute'],
- ]},
- 'gc_time': {
- 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
- 'lines': [
- ["gc_copy_time", 'Copy', 'incremental'],
- ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_time", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
- ]},
- 'gc_ope': {
- 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
- 'lines': [
- ["gc_copy_count", 'Copy', 'incremental'],
- ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_count", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
- ]},
- 'heap': {
- 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
- 'lines': [
- ["heap_committed", 'committed', "absolute"],
- ["heap_used", 'used', "absolute"],
- ]},
-}
-
-class ExtraChartError(ValueError):
- pass
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', "http://localhost:8080/metrics")
- self._setup_charts()
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- try:
- data = json.loads(raw_data)
- except ValueError:
- self.debug('%s is not a vaild JSON page' % self.url)
- return None
-
- result = {
- 'resp_1xx': 0,
- 'resp_2xx': 0,
- 'resp_3xx': 0,
- 'resp_4xx': 0,
- 'resp_5xx': 0,
- 'resp_other': 0,
- }
-
- for key, value in data.iteritems():
- if 'counter.status.' in key:
- status_type = key[15:16] + 'xx'
- if status_type[0] not in '12345':
- status_type = 'other'
- result['resp_' + status_type] += value
- else:
- result[key.replace('.', '_')] = value
-
- return result or None
-
- def _setup_charts(self):
- self.order = []
- self.definitions = {}
- defaults = self.configuration.get('defaults', {})
-
- for chart in DEFAULT_ORDER:
- if defaults.get(chart, True):
- self.order.append(chart)
- self.definitions[chart] = DEFAULT_CHARTS[chart]
-
- for extra in self.configuration.get('extras', []):
- self._add_extra_chart(extra)
- self.order.append(extra['id'])
-
- def _add_extra_chart(self, chart):
- chart_id = chart.get('id', None) or die('id is not defined in extra chart')
- options = chart.get('options', None) or die('option is not defined in extra chart: %s' % chart_id)
- lines = chart.get('lines', None) or die('lines is not defined in extra chart: %s' % chart_id)
-
- title = options.get('title', None) or die('title is missing: %s' % chart_id)
- units = options.get('units', None) or die('units is missing: %s' % chart_id)
- family = options.get('family', title)
- context = options.get('context', 'springboot.' + title)
- charttype = options.get('charttype', 'line')
-
- result = {
- 'options': [None, title, units, family, context, charttype],
- 'lines': [],
- }
-
- for line in lines:
- dimension = line.get('dimension', None) or die('dimension is missing: %s' % chart_id)
- name = line.get('name', dimension)
- algorithm = line.get('algorithm', 'absolute')
- multiplier = line.get('multiplier', 1)
- divisor = line.get('divisor', 1)
- result['lines'].append([dimension, name, algorithm, multiplier, divisor])
-
- self.definitions[chart_id] = result
-
- @staticmethod
- def die(error_message):
- raise ExtraChartError(error_message)
diff --git a/python.d/squid.chart.py b/python.d/squid.chart.py
deleted file mode 100644
index ba8f982ff..000000000
--- a/python.d/squid.chart.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: squid netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.SocketService import SocketService
-
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
-
-CHARTS = {
- 'clients_net': {
- 'options': [None, "Squid Client Bandwidth", "kilobits/s", "clients", "squid.clients_net", "area"],
- 'lines': [
- ["client_http_kbytes_in", "in", "incremental", 8, 1],
- ["client_http_kbytes_out", "out", "incremental", -8, 1],
- ["client_http_hit_kbytes_out", "hits", "incremental", -8, 1]
- ]},
- 'clients_requests': {
- 'options': [None, "Squid Client Requests", "requests/s", "clients", "squid.clients_requests", 'line'],
- 'lines': [
- ["client_http_requests", "requests", "incremental"],
- ["client_http_hits", "hits", "incremental"],
- ["client_http_errors", "errors", "incremental", -1, 1]
- ]},
- 'servers_net': {
- 'options': [None, "Squid Server Bandwidth", "kilobits/s", "servers", "squid.servers_net", "area"],
- 'lines': [
- ["server_all_kbytes_in", "in", "incremental", 8, 1],
- ["server_all_kbytes_out", "out", "incremental", -8, 1]
- ]},
- 'servers_requests': {
- 'options': [None, "Squid Server Requests", "requests/s", "servers", "squid.servers_requests", 'line'],
- 'lines': [
- ["server_all_requests", "requests", "incremental"],
- ["server_all_errors", "errors", "incremental", -1, 1]
- ]}
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self._keep_alive = True
- self.request = ""
- self.host = "localhost"
- self.port = 3128
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Get data via http request
- :return: dict
- """
- response = self._get_raw_data()
-
- data = dict()
- try:
- raw = ""
- for tmp in response.split('\r\n'):
- if tmp.startswith("sample_time"):
- raw = tmp
- break
-
- if raw.startswith('<'):
- self.error("invalid data received")
- return None
-
- for row in raw.split('\n'):
- if row.startswith(("client", "server.all")):
- tmp = row.split("=")
- data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
-
- except (ValueError, AttributeError, TypeError):
- self.error("invalid data received")
- return None
-
- if not data:
- self.error("no data received")
- return None
- return data
-
- def _check_raw_data(self, data):
- header = data[:1024].lower()
-
- if "connection: keep-alive" in header:
- self._keep_alive = True
- else:
- self._keep_alive = False
-
- if data[-7:] == "\r\n0\r\n\r\n" and "transfer-encoding: chunked" in header: # HTTP/1.1 response
- self.debug("received full response from squid")
- return True
-
- self.debug("waiting more data from squid")
- return False
-
- def check(self):
- """
- Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
- :return: boolean
- """
- self._parse_config()
- # format request
- req = self.request.decode()
- if not req.startswith("GET"):
- req = "GET " + req
- if not req.endswith(" HTTP/1.1\r\n\r\n"):
- req += " HTTP/1.1\r\n\r\n"
- self.request = req.encode()
- if self._get_data() is not None:
- return True
- else:
- return False
diff --git a/python.d/tomcat.chart.py b/python.d/tomcat.chart.py
deleted file mode 100644
index a570d5643..000000000
--- a/python.d/tomcat.chart.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: tomcat netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-import xml.etree.ElementTree as ET
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured']
-
-CHARTS = {
- 'accesses': {
- 'options': [None, "Requests", "requests/s", "statistics", "tomcat.accesses", "area"],
- 'lines': [
- ["requestCount", 'accesses', 'incremental'],
- ["errorCount", 'errors', 'incremental'],
- ]},
- 'bandwidth': {
- 'options': [None, "Bandwidth", "KB/s", "statistics", "tomcat.bandwidth", "area"],
- 'lines': [
- ["bytesSent", 'sent', 'incremental', 1, 1024],
- ["bytesReceived", 'received', 'incremental', 1, 1024],
- ]},
- 'processing_time': {
- 'options': [None, "processing time", "seconds", "statistics", "tomcat.processing_time", "area"],
- 'lines': [
- ["processingTime", 'processing time', 'incremental', 1, 1000]
- ]},
- 'threads': {
- 'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "area"],
- 'lines': [
- ["currentThreadCount", 'current', "absolute"],
- ["currentThreadsBusy", 'busy', "absolute"]
- ]},
- 'jvm': {
- 'options': [None, "JVM Memory Pool Usage", "MB", "memory", "tomcat.jvm", "stacked"],
- 'lines': [
- ["free", 'free', "absolute", 1, 1048576],
- ["eden_used", 'eden', "absolute", 1, 1048576],
- ["survivor_used", 'survivor', "absolute", 1, 1048576],
- ["tenured_used", 'tenured', "absolute", 1, 1048576],
- ["code_cache_used", 'code cache', "absolute", 1, 1048576],
- ["compressed_used", 'compressed', "absolute", 1, 1048576],
- ["metaspace_used", 'metaspace', "absolute", 1, 1048576],
- ]},
- 'jvm_eden': {
- 'options': [None, "Eden Memory Usage", "MB", "memory", "tomcat.jvm_eden", "area"],
- 'lines': [
- ["eden_used", 'used', "absolute", 1, 1048576],
- ["eden_commited", 'commited', "absolute", 1, 1048576],
- ["eden_max", 'max', "absolute", 1, 1048576]
- ]},
- 'jvm_survivor': {
- 'options': [None, "Survivor Memory Usage", "MB", "memory", "tomcat.jvm_survivor", "area"],
- 'lines': [
- ["survivor_used", 'used', "absolute", 1, 1048576],
- ["survivor_commited", 'commited', "absolute", 1, 1048576],
- ["survivor_max", 'max', "absolute", 1, 1048576]
- ]},
- 'jvm_tenured': {
- 'options': [None, "Tenured Memory Usage", "MB", "memory", "tomcat.jvm_tenured", "area"],
- 'lines': [
- ["tenured_used", 'used', "absolute", 1, 1048576],
- ["tenured_commited", 'commited', "absolute", 1, 1048576],
- ["tenured_max", 'max', "absolute", 1, 1048576]
- ]},
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', "http://127.0.0.1:8080/manager/status?XML=true")
- self.connector_name = self.configuration.get('connector_name', None)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- data = None
- raw_data = self._get_raw_data()
- if raw_data:
- try:
- xml = ET.fromstring(raw_data)
- except ET.ParseError:
- self.debug('%s is not a vaild XML page. Please add "?XML=true" to tomcat status page.' % self.url)
- return None
- data = {}
-
- jvm = xml.find('jvm')
-
- connector = None
- if self.connector_name:
- for conn in xml.findall('connector'):
- if self.connector_name in conn.get('name'):
- connector = conn
- break
- else:
- connector = xml.find('connector')
-
- memory = jvm.find('memory')
- data['free'] = memory.get('free')
- data['total'] = memory.get('total')
-
- for pool in jvm.findall('memorypool'):
- name = pool.get('name')
- if 'Eden Space' in name:
- data['eden_used'] = pool.get('usageUsed')
- data['eden_commited'] = pool.get('usageCommitted')
- data['eden_max'] = pool.get('usageMax')
- elif 'Survivor Space' in name:
- data['survivor_used'] = pool.get('usageUsed')
- data['survivor_commited'] = pool.get('usageCommitted')
- data['survivor_max'] = pool.get('usageMax')
- elif 'Tenured Gen' in name or 'Old Gen' in name:
- data['tenured_used'] = pool.get('usageUsed')
- data['tenured_commited'] = pool.get('usageCommitted')
- data['tenured_max'] = pool.get('usageMax')
- elif name == 'Code Cache':
- data['code_cache_used'] = pool.get('usageUsed')
- data['code_cache_commited'] = pool.get('usageCommitted')
- data['code_cache_max'] = pool.get('usageMax')
- elif name == 'Compressed':
- data['compressed_used'] = pool.get('usageUsed')
- data['compressed_commited'] = pool.get('usageCommitted')
- data['compressed_max'] = pool.get('usageMax')
- elif name == 'Metaspace':
- data['metaspace_used'] = pool.get('usageUsed')
- data['metaspace_commited'] = pool.get('usageCommitted')
- data['metaspace_max'] = pool.get('usageMax')
-
- if connector:
- thread_info = connector.find('threadInfo')
- data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
- data['currentThreadCount'] = thread_info.get('currentThreadCount')
-
- request_info = connector.find('requestInfo')
- data['processingTime'] = request_info.get('processingTime')
- data['requestCount'] = request_info.get('requestCount')
- data['errorCount'] = request_info.get('errorCount')
- data['bytesReceived'] = request_info.get('bytesReceived')
- data['bytesSent'] = request_info.get('bytesSent')
-
- return data or None
diff --git a/python.d/traefik.chart.py b/python.d/traefik.chart.py
deleted file mode 100644
index f7c3e223b..000000000
--- a/python.d/traefik.chart.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: traefik netdata python.d module
-# Author: Alexandre Menezes (@ale_menezes)
-
-from json import loads
-from collections import defaultdict
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 10
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'response_statuses',
- 'response_codes',
- 'detailed_response_codes',
- 'requests',
- 'total_response_time',
- 'average_response_time',
- 'average_response_time_per_iteration',
- 'uptime'
-]
-
-CHARTS = {
- 'response_statuses': {
- 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'],
- 'lines': [
- ['successful_requests', 'success', 'incremental'],
- ['server_errors', 'error', 'incremental'],
- ['redirects', 'redirect', 'incremental'],
- ['bad_requests', 'bad', 'incremental'],
- ['other_requests', 'other', 'incremental']
- ]},
- 'response_codes': {
- 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
- 'lines': [
- ['2xx', None, 'incremental'],
- ['5xx', None, 'incremental'],
- ['3xx', None, 'incremental'],
- ['4xx', None, 'incremental'],
- ['1xx', None, 'incremental'],
- ['other', None, 'incremental']
- ]},
- 'detailed_response_codes': {
- 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes', 'stacked'],
- 'lines': [
- ]},
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
- 'lines': [
- ['total_count', 'requests', 'incremental']
- ]},
- 'total_response_time': {
- 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
- 'lines': [
- ['total_response_time_sec', 'response', 'absolute', 1, 10000]
- ]},
- 'average_response_time': {
- 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
- 'lines': [
- ['average_response_time_sec', 'response', 'absolute', 1, 1000]
- ]},
- 'average_response_time_per_iteration': {
- 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings', 'traefik.average_response_time_per_iteration', 'line'],
- 'lines': [
- ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
- ]},
- 'uptime': {
- 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
- 'lines': [
- ['uptime_sec', 'uptime', 'absolute']
- ]}
- }
-
-HEALTH_STATS = [
- 'uptime_sec',
- 'average_response_time_sec',
- 'total_response_time_sec',
- 'total_count',
- 'total_status_code_count'
-]
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost:8080/health')
- self.order = ORDER
- self.definitions = CHARTS
- self.data = {
- 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0,
- 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0,
- '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0,
- 'average_response_time_per_iteration_sec': 0
- }
- self.last_total_response_time = 0
- self.last_total_count = 0
-
- def _get_data(self):
- data = self._get_raw_data()
-
- if not data:
- return None
-
- data = loads(data)
-
- self.get_data_per_code_status(raw_data=data)
-
- self.get_data_per_code_family(raw_data=data)
-
- self.get_data_per_code(raw_data=data)
-
- self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS))
-
- self.data['average_response_time_sec'] *= 1000000
- self.data['total_response_time_sec'] *= 10000
- if data['total_count'] != self.last_total_count:
- self.data['average_response_time_per_iteration_sec'] = (data['total_response_time_sec'] - self.last_total_response_time) * 1000000 / (data['total_count'] - self.last_total_count)
- else:
- self.data['average_response_time_per_iteration_sec'] = 0
- self.last_total_response_time = data['total_response_time_sec']
- self.last_total_count = data['total_count']
-
- return self.data or None
-
- def get_data_per_code_status(self, raw_data):
- data = defaultdict(int)
- for code, value in raw_data['total_status_code_count'].items():
- code_prefix = code[0]
- if code_prefix == '1' or code_prefix == '2' or code == '304':
- data['successful_requests'] += value
- elif code_prefix == '3':
- data['redirects'] += value
- elif code_prefix == '4':
- data['bad_requests'] += value
- elif code_prefix == '5':
- data['server_errors'] += value
- else:
- data['other_requests'] += value
- self.data.update(data)
-
- def get_data_per_code_family(self, raw_data):
- data = defaultdict(int)
- for code, value in raw_data['total_status_code_count'].items():
- code_prefix = code[0]
- if code_prefix == '1':
- data['1xx'] += value
- elif code_prefix == '2':
- data['2xx'] += value
- elif code_prefix == '3':
- data['3xx'] += value
- elif code_prefix == '4':
- data['4xx'] += value
- elif code_prefix == '5':
- data['5xx'] += value
- else:
- data['other'] += value
- self.data.update(data)
-
- def get_data_per_code(self, raw_data):
- for code, value in raw_data['total_status_code_count'].items():
- if self.charts:
- if code not in self.data:
- self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
- self.data[code] = value
-
-def fetch_data_(raw_data, metrics):
- data = dict()
-
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError:
- continue
- data['_'.join(metrics_list)] = value
-
- return data
diff --git a/python.d/varnish.chart.py b/python.d/varnish.chart.py
deleted file mode 100644
index d8145c0b6..000000000
--- a/python.d/varnish.chart.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: varnish netdata python.d module
-# Author: l2isbad
-
-import re
-
-from bases.collection import find_binary
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-ORDER = ['session_connections', 'client_requests',
- 'all_time_hit_rate', 'current_poll_hit_rate', 'cached_objects_expired', 'cached_objects_nuked',
- 'threads_total', 'threads_statistics', 'threads_queue_len',
- 'backend_connections', 'backend_requests',
- 'esi_statistics',
- 'memory_usage',
- 'uptime']
-
-CHARTS = {
- 'session_connections': {
- 'options': [None, 'Connections Statistics', 'connections/s',
- 'client metrics', 'varnish.session_connection', 'line'],
- 'lines': [
- ['sess_conn', 'accepted', 'incremental'],
- ['sess_dropped', 'dropped', 'incremental']
- ]
- },
- 'client_requests': {
- 'options': [None, 'Client Requests', 'requests/s',
- 'client metrics', 'varnish.client_requests', 'line'],
- 'lines': [
- ['client_req', 'received', 'incremental']
- ]
- },
- 'all_time_hit_rate': {
- 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance',
- 'varnish.all_time_hit_rate', 'stacked'],
- 'lines': [
- ['cache_hit', 'hit', 'percentage-of-absolute-row'],
- ['cache_miss', 'miss', 'percentage-of-absolute-row'],
- ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
- },
- 'current_poll_hit_rate': {
- 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance',
- 'varnish.current_poll_hit_rate', 'stacked'],
- 'lines': [
- ['cache_hit', 'hit', 'percentage-of-incremental-row'],
- ['cache_miss', 'miss', 'percentage-of-incremental-row'],
- ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
- ]
- },
- 'cached_objects_expired': {
- 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
- 'varnish.cached_objects_expired', 'line'],
- 'lines': [
- ['n_expired', 'objects', 'incremental']
- ]
- },
- 'cached_objects_nuked': {
- 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
- 'varnish.cached_objects_nuked', 'line'],
- 'lines': [
- ['n_lru_nuked', 'objects', 'incremental']
- ]
- },
- 'threads_total': {
- 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
- 'varnish.threads_total', 'line'],
- 'lines': [
- ['threads', None, 'absolute']
- ]
- },
- 'threads_statistics': {
- 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
- 'varnish.threads_statistics', 'line'],
- 'lines': [
- ['threads_created', 'created', 'incremental'],
- ['threads_failed', 'failed', 'incremental'],
- ['threads_limited', 'limited', 'incremental']
- ]
- },
- 'threads_queue_len': {
- 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
- 'varnish.threads_queue_len', 'line'],
- 'lines': [
- ['thread_queue_len', 'in queue']
- ]
- },
- 'backend_connections': {
- 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
- 'varnish.backend_connections', 'line'],
- 'lines': [
- ['backend_conn', 'successful', 'incremental'],
- ['backend_unhealthy', 'unhealthy', 'incremental'],
- ['backend_reuse', 'reused', 'incremental'],
- ['backend_toolate', 'closed', 'incremental'],
- ['backend_recycle', 'resycled', 'incremental'],
- ['backend_fail', 'failed', 'incremental']
- ]
- },
- 'backend_requests': {
- 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
- 'varnish.backend_requests', 'line'],
- 'lines': [
- ['backend_req', 'sent', 'incremental']
- ]
- },
- 'esi_statistics': {
- 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
- 'lines': [
- ['esi_errors', 'errors', 'incremental'],
- ['esi_warnings', 'warnings', 'incremental']
- ]
- },
- 'memory_usage': {
- 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'],
- 'lines': [
- ['memory_free', 'free', 'absolute', 1, 1 << 20],
- ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
- },
- 'uptime': {
- 'lines': [
- ['uptime', None, 'absolute']
- ],
- 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
- }
-}
-
-
-class Parser:
- _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
- _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)')
- _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
-
- def __init__(self):
- self.re_default = None
- self.re_backend = None
-
- def init(self, data):
- data = ''.join(data)
- parsed_main = Parser._default.findall(data)
- if parsed_main:
- self.re_default = Parser._default
-
- parsed_backend = Parser._backend_new.findall(data)
- if parsed_backend:
- self.re_backend = Parser._backend_new
- else:
- parsed_backend = Parser._backend_old.findall(data)
- if parsed_backend:
- self.re_backend = Parser._backend_old
-
- def server_stats(self, data):
- return self.re_default.findall(''.join(data))
-
- def backend_stats(self, data):
- return self.re_backend.findall(''.join(data))
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- varnishstat = find_binary('varnishstat')
- self.command = [varnishstat, '-1'] if varnishstat else None
- self.parser = Parser()
-
- def check(self):
- if not self.command:
- self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata")
- return False
-
- # STDOUT is not empty
- reply = self._get_raw_data()
- if not reply:
- self.error("No output from 'varnishstat'. Not enough privileges?")
- return False
-
- self.parser.init(reply)
-
- # Output is parsable
- if not self.parser.re_default:
- self.error('Cant parse the output...')
- return False
-
- if self.parser.re_backend:
- backends = [b[0] for b in self.parser.backend_stats(reply)[::2]]
- self.create_backends_charts(backends)
- return True
-
- def get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- data = dict()
- server_stats = self.parser.server_stats(raw)
- if not server_stats:
- return None
-
- if self.parser.re_backend:
- backend_stats = self.parser.backend_stats(raw)
- data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats))
-
- data.update(dict((param, value) for _, param, value in server_stats))
-
- data['memory_allocated'] = data['s0.g_bytes']
- data['memory_free'] = data['s0.g_space']
-
- return data
-
- def create_backends_charts(self, backends):
- for backend in backends:
- chart_name = ''.join([backend, '_response_statistics'])
- title = 'Backend "{0}"'.format(backend.capitalize())
- hdr_bytes = ''.join([backend, '_beresp_hdrbytes'])
- body_bytes = ''.join([backend, '_beresp_bodybytes'])
-
- chart = {
- chart_name:
- {
- 'options': [None, title, 'kilobits/s', 'backend response statistics',
- 'varnish.backend', 'area'],
- 'lines': [
- [hdr_bytes, 'header', 'incremental', 8, 1000],
- [body_bytes, 'body', 'incremental', -8, 1000]
- ]
- }
- }
-
- self.order.insert(0, chart_name)
- self.definitions.update(chart)
diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py
deleted file mode 100644
index be9baba92..000000000
--- a/python.d/web_log.chart.py
+++ /dev/null
@@ -1,1047 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: web log netdata python.d module
-# Author: l2isbad
-
-import bisect
-import re
-import os
-import sys
-
-from collections import namedtuple, defaultdict
-from copy import deepcopy
-
-try:
- from itertools import filterfalse
-except ImportError:
- from itertools import ifilter as filter
- from itertools import ifilterfalse as filterfalse
-
-from bases.collection import read_last_line
-from bases.FrameworkServices.LogService import LogService
-
-
-ORDER_APACHE_CACHE = ['apache_cache']
-
-ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth',
- 'response_time', 'response_time_hist', 'response_time_upstream', 'response_time_upstream_hist',
- 'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version',
- 'requests_per_ipproto', 'clients', 'clients_all']
-
-ORDER_SQUID = ['squid_response_statuses', 'squid_response_codes', 'squid_detailed_response_codes',
- 'squid_method', 'squid_mime_type', 'squid_hier_code', 'squid_transport_methods',
- 'squid_transport_errors', 'squid_code', 'squid_handling_opts', 'squid_object_types',
- 'squid_cache_events', 'squid_bytes', 'squid_duration', 'squid_clients', 'squid_clients_all']
-
-CHARTS_WEB = {
- 'response_codes': {
- 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
- 'lines': [
- ['2xx', None, 'incremental'],
- ['5xx', None, 'incremental'],
- ['3xx', None, 'incremental'],
- ['4xx', None, 'incremental'],
- ['1xx', None, 'incremental'],
- ['0xx', 'other', 'incremental'],
- ['unmatched', None, 'incremental']
- ]},
- 'bandwidth': {
- 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
- 'lines': [
- ['resp_length', 'received', 'incremental', 8, 1000],
- ['bytes_sent', 'sent', 'incremental', -8, 1000]
- ]},
- 'response_time': {
- 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
- 'lines': [
- ['resp_time_min', 'min', 'incremental', 1, 1000],
- ['resp_time_max', 'max', 'incremental', 1, 1000],
- ['resp_time_avg', 'avg', 'incremental', 1, 1000]
- ]},
- 'response_time_hist': {
- 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
- 'lines': [
- ]},
- 'response_time_upstream': {
- 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
- 'web_log.response_time_upstream', 'area'],
- 'lines': [
- ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
- ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
- ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
- ]},
- 'response_time_upstream_hist': {
- 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
- 'web_log.response_time_upstream_hist', 'line'],
- 'lines': [
- ]},
- 'clients': {
- 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
- 'lines': [
- ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
- ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
- ]},
- 'clients_all': {
- 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
- 'lines': [
- ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
- ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
- ]},
- 'http_method': {
- 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
- 'lines': [
- ['GET', 'GET', 'incremental', 1, 1]
- ]},
- 'http_version': {
- 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
- 'web_log.http_version', 'stacked'],
- 'lines': []},
- 'requests_per_ipproto': {
- 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
- 'stacked'],
- 'lines': [
- ['req_ipv4', 'ipv4', 'incremental', 1, 1],
- ['req_ipv6', 'ipv6', 'incremental', 1, 1]
- ]},
- 'response_statuses': {
- 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses',
- 'stacked'],
- 'lines': [
- ['successful_requests', 'success', 'incremental', 1, 1],
- ['server_errors', 'error', 'incremental', 1, 1],
- ['redirects', 'redirect', 'incremental', 1, 1],
- ['bad_requests', 'bad', 'incremental', 1, 1],
- ['other_requests', 'other', 'incremental', 1, 1]
- ]},
- 'requests_per_url': {
- 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url',
- 'stacked'],
- 'lines': [
- ['url_pattern_other', 'other', 'incremental', 1, 1]
- ]},
- 'requests_per_user_defined': {
- 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
- 'web_log.requests_per_user_defined', 'stacked'],
- 'lines': [
- ['user_pattern_other', 'other', 'incremental', 1, 1]
- ]}
-}
-
-CHARTS_APACHE_CACHE = {
- 'apache_cache': {
- 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
- 'stacked'],
- 'lines': [
- ["hit", 'cache', "percentage-of-absolute-row"],
- ["miss", None, "percentage-of-absolute-row"],
- ["other", None, "percentage-of-absolute-row"]
- ]}
-}
-
-CHARTS_SQUID = {
- 'squid_duration': {
- 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
- 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
- 'lines': [
- ['duration_min', 'min', 'incremental', 1, 1000],
- ['duration_max', 'max', 'incremental', 1, 1000],
- ['duration_avg', 'avg', 'incremental', 1, 1000]
- ]},
- 'squid_bytes': {
- 'options': [None, 'Amount Of Data Delivered To The Clients',
- 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
- 'lines': [
- ['bytes', 'sent', 'incremental', 8, 1000]
- ]},
- 'squid_response_statuses': {
- 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
- 'stacked'],
- 'lines': [
- ['successful_requests', 'success', 'incremental', 1, 1],
- ['server_errors', 'error', 'incremental', 1, 1],
- ['redirects', 'redirect', 'incremental', 1, 1],
- ['bad_requests', 'bad', 'incremental', 1, 1],
- ['other_requests', 'other', 'incremental', 1, 1]
- ]},
- 'squid_response_codes': {
- 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
- 'web_log.squid_response_codes', 'stacked'],
- 'lines': [
- ['2xx', None, 'incremental'],
- ['5xx', None, 'incremental'],
- ['3xx', None, 'incremental'],
- ['4xx', None, 'incremental'],
- ['1xx', None, 'incremental'],
- ['0xx', None, 'incremental'],
- ['other', None, 'incremental'],
- ['unmatched', None, 'incremental']
- ]},
- 'squid_code': {
- 'options': [None, 'Responses Per Cache Result Of The Request',
- 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
- 'lines': [
- ]},
- 'squid_detailed_response_codes': {
- 'options': [None, 'Detailed Response Codes',
- 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
- 'lines': [
- ]},
- 'squid_hier_code': {
- 'options': [None, 'Responses Per Hierarchy Code',
- 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
- 'lines': [
- ]},
- 'squid_method': {
- 'options': [None, 'Requests Per Method',
- 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
- 'lines': [
- ]},
- 'squid_mime_type': {
- 'options': [None, 'Requests Per MIME Type',
- 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
- 'lines': [
- ]},
- 'squid_clients': {
- 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
- 'web_log.squid_clients', 'stacked'],
- 'lines': [
- ['unique_ipv4', 'ipv4', 'incremental'],
- ['unique_ipv6', 'ipv6', 'incremental']
- ]},
- 'squid_clients_all': {
- 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
- 'web_log.squid_clients_all', 'stacked'],
- 'lines': [
- ['unique_tot_ipv4', 'ipv4', 'absolute'],
- ['unique_tot_ipv6', 'ipv6', 'absolute']
- ]},
- 'squid_transport_methods': {
- 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
- 'web_log.squid_transport_methods', 'stacked'],
- 'lines': [
- ]},
- 'squid_transport_errors': {
- 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
- 'web_log.squid_transport_errors', 'stacked'],
- 'lines': [
- ]},
- 'squid_handling_opts': {
- 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
- 'web_log.squid_handling_opts', 'stacked'],
- 'lines': [
- ]},
- 'squid_object_types': {
- 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
- 'web_log.squid_object_types', 'stacked'],
- 'lines': [
- ]},
- 'squid_cache_events': {
- 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
- 'web_log.squid_cache_events', 'stacked'],
- 'lines': [
- ]}
-}
-
-NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
-
-DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
-
-SQUID_CODES = dict(TCP='squid_transport_methods', UDP='squid_transport_methods', NONE='squid_transport_methods',
- CLIENT='squid_handling_opts', IMS='squid_handling_opts', ASYNC='squid_handling_opts',
- SWAPFAIL='squid_handling_opts', REFRESH='squid_handling_opts', SHARED='squid_handling_opts',
- REPLY='squid_handling_opts', NEGATIVE='squid_object_types', STALE='squid_object_types',
- OFFLINE='squid_object_types', INVALID='squid_object_types', FAIL='squid_object_types',
- MODIFIED='squid_object_types', UNMODIFIED='squid_object_types', REDIRECT='squid_object_types',
- HIT='squid_cache_events', MEM='squid_cache_events', MISS='squid_cache_events',
- DENIED='squid_cache_events', NOFETCH='squid_cache_events', TUNNEL='squid_cache_events',
- ABORTED='squid_transport_errors', TIMEOUT='squid_transport_errors')
-
-REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
-
-
-class Service(LogService):
- def __init__(self, configuration=None, name=None):
- """
- :param configuration:
- :param name:
- """
- LogService.__init__(self, configuration=configuration, name=name)
- self.configuration = configuration
- self.log_path = self.configuration.get('path')
- self.job = None
-
- def check(self):
- """
- :return: bool
-
- 1. "log_path" is specified in the module configuration file
- 2. "log_path" must be readable by netdata user and must exist
- 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
- 4. other checks depends on log "type"
- """
-
- log_type = self.configuration.get('type', 'web')
- log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
-
- if log_type not in log_types:
- self.error("bad log type {log_type}. Supported types: {types}".format(log_type=log_type,
- types=log_types.keys()))
- return False
-
- if not self.log_path:
- self.error('log path is not specified')
- return False
-
- if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)):
- self.error('{log_file} not readable or not exist'.format(log_file=self.log_path))
- return False
-
- if not os.path.getsize(self.log_path):
- self.error('{log_file} is empty'.format(log_file=self.log_path))
- return False
-
- self.job = log_types[log_type](self)
- if self.job.check():
- self.order = self.job.order
- self.definitions = self.job.definitions
- return True
- return False
-
- def _get_data(self):
- return self.job.get_data(self._get_raw_data())
-
-
-class Web:
- def __init__(self, service):
- self.service = service
- self.order = ORDER_WEB[:]
- self.definitions = deepcopy(CHARTS_WEB)
- self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
- self.storage = dict()
- self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
- 'resp_time_avg': 0, 'resp_time_upstream_min': 0, 'resp_time_upstream_max': 0,
- 'resp_time_upstream_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
- '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
- 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
-
- def __getattr__(self, item):
- return getattr(self.service, item)
-
- def check(self):
- last_line = read_last_line(self.log_path)
- if not last_line:
- return False
- # Custom_log_format or predefined log format.
- if self.configuration.get('custom_log_format'):
- match_dict, error = self.find_regex_custom(last_line)
- else:
- match_dict, error = self.find_regex(last_line)
-
- # "match_dict" is None if there are any problems
- if match_dict is None:
- self.error(error)
- return False
-
- self.storage['unique_all_time'] = list()
- self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories'))
- self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined'))
-
- self.create_web_charts(match_dict) # Create charts
- self.info('Collected data: %s' % list(match_dict.keys()))
- return True
-
- def create_web_charts(self, match_dict):
- """
- :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
- :return:
- Create/remove additional charts depending on the 'match_dict' keys and configuration file options
- """
- if 'resp_time' not in match_dict:
- self.order.remove('response_time')
- self.order.remove('response_time_hist')
- if 'resp_time_upstream' not in match_dict:
- self.order.remove('response_time_upstream')
- self.order.remove('response_time_upstream_hist')
-
- # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
- histogram = self.configuration.get('histogram', None)
- if isinstance(histogram, list):
- self.storage['bucket_index'] = histogram[:]
- self.storage['bucket_index'].append(sys.maxint)
- self.storage['buckets'] = [0] * (len(histogram) + 1)
- self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
- hist_lines = self.definitions['response_time_hist']['lines']
- upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
- for i, le in enumerate(histogram):
- hist_key = "response_time_hist_%d" % i
- upstream_hist_key = "response_time_upstream_hist_%d" % i
- hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
- upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
-
- hist_lines.append(["response_time_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
- upstream_hist_lines.append(["response_time_upstream_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
- elif histogram is not None:
- self.error("expect histogram list, but was {0}".format(type(histogram)))
-
- if not self.configuration.get('all_time', True):
- self.order.remove('clients_all')
-
- # Add 'detailed_response_codes' chart if specified in the configuration
- if self.configuration.get('detailed_response_codes', True):
- if self.configuration.get('detailed_response_aggregate', True):
- codes = DET_RESP_AGGR[:1]
- else:
- codes = DET_RESP_AGGR[1:]
-
- for code in codes:
- self.order.append('detailed_response_codes%s' % code)
- self.definitions['detailed_response_codes%s' % code] \
- = {'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
- 'web_log.detailed_response_codes%s' % code, 'stacked'],
- 'lines': []}
-
- # Add 'requests_per_url' chart if specified in the configuration
- if self.storage['url_pattern']:
- for elem in self.storage['url_pattern']:
- dim = [elem.description, elem.description[12:], 'incremental']
- self.definitions['requests_per_url']['lines'].append(dim)
- self.data[elem.description] = 0
- self.data['url_pattern_other'] = 0
- else:
- self.order.remove('requests_per_url')
-
- # Add 'requests_per_user_defined' chart if specified in the configuration
- if self.storage['user_pattern'] and 'user_defined' in match_dict:
- for elem in self.storage['user_pattern']:
- dim = [elem.description, elem.description[13:], 'incremental']
- self.definitions['requests_per_user_defined']['lines'].append(dim)
- self.data[elem.description] = 0
- self.data['user_pattern_other'] = 0
- else:
- self.order.remove('requests_per_user_defined')
-
- def get_data(self, raw_data=None):
- """
- Parses new log lines
- :return: dict OR None
- None if _get_raw_data method fails.
- In all other cases - dict.
- """
- if not raw_data:
- return None if raw_data is None else self.data
-
- filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
-
- unique_current = set()
- timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
-
- for line in filtered_data:
- match = self.storage['regex'].search(line)
- if match:
- match_dict = match.groupdict()
- try:
- code = match_dict['code'][0] + 'xx'
- self.data[code] += 1
- except KeyError:
- self.data['0xx'] += 1
- # detailed response code
- if self.configuration.get('detailed_response_codes', True):
- self.get_data_per_response_codes_detailed(code=match_dict['code'])
- # response statuses
- self.get_data_per_statuses(code=match_dict['code'])
- # requests per user defined pattern
- if self.storage['user_pattern'] and 'user_defined' in match_dict:
- self.get_data_per_pattern(row=match_dict['user_defined'],
- other='user_pattern_other',
- pattern=self.storage['user_pattern'])
- # method, url, http version
- self.get_data_from_request_field(match_dict=match_dict)
- # bandwidth sent
- bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
- self.data['bytes_sent'] += int(bytes_sent)
- # request processing time and bandwidth received
- if 'resp_length' in match_dict:
- resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
- self.data['resp_length'] += int(resp_length)
- if 'resp_time' in match_dict:
- resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
- get_timings(timings=timings['resp_time'], time=resp_time)
- if 'bucket_index' in self.storage:
- get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
- if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
- resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
- get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
- if 'bucket_index' in self.storage:
- get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
- # requests per ip proto
- proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
- self.data['req_' + proto] += 1
- # unique clients ips
- if self.configuration.get('all_time', True):
- if address_not_in_pool(pool=self.storage['unique_all_time'],
- address=match_dict['address'],
- pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
- self.data['unique_tot_' + proto] += 1
- if match_dict['address'] not in unique_current:
- self.data['unique_cur_' + proto] += 1
- unique_current.add(match_dict['address'])
- else:
- self.data['unmatched'] += 1
-
- # timings
- for elem in timings:
- self.data[elem + '_min'] += timings[elem]['minimum']
- self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
- self.data[elem + '_max'] += timings[elem]['maximum']
-
- # histogram
- if 'bucket_index' in self.storage:
- buckets = self.storage['buckets']
- upstream_buckets = self.storage['upstream_buckets']
- for i in range(0, len(self.storage['bucket_index'])):
- hist_key = "response_time_hist_%d" % i
- upstream_hist_key = "response_time_upstream_hist_%d" % i
- self.data[hist_key] = buckets[i]
- self.data[upstream_hist_key] = upstream_buckets[i]
-
- return self.data
-
- def find_regex(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
- We need to find appropriate pattern for current log file
- All logic is do a regex search through the string for all predefined patterns
- until we find something or fail.
- """
- # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
- # 5. Bytes sent 6. Response length 7. Response process time
- default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)')
-
- apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' (?P<resp_length>\d+|-)'
- r' (?P<resp_time>\d+) ')
-
- apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' .*?'
- r' (?P<resp_length>\d+|-)'
- r' (?P<resp_time>\d+)'
- r'(?: |$)')
-
- nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+) ')
-
- nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+)'
- r' (?P<resp_time_upstream>[\d.-]+) ')
-
- nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' .*?'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+)')
-
- def func_usec(time):
- return time
-
- def func_sec(time):
- return time * 1000000
-
- r_regex = [apache_ext_insert, apache_ext_append,
- nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
- default]
- r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
- regex_function = zip(r_regex, r_function)
-
- match_dict = dict()
- for regex, func in regex_function:
- match = regex.search(last_line)
- if match:
- self.storage['regex'] = regex
- self.storage['func_resp_time'] = func
- match_dict = match.groupdict()
- break
-
- return find_regex_return(match_dict=match_dict or None,
- msg='Unknown log format. You need to use "custom_log_format" feature.')
-
- def find_regex_custom(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
-
- We are here only if "custom_log_format" is in logs. We need to make sure:
- 1. "custom_log_format" is a dict
- 2. "pattern" in "custom_log_format" and pattern is <str> instance
- 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
-
- If all parameters is ok we need to make sure:
- 1. Pattern search is success
- 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
-
- If pattern search is success we need to make sure:
- 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
-
- If this is True we need to make sure:
- 1. All mandatory key values from "match_dict" have the correct format
- ("code" is integer, "method" is uppercase word, etc)
-
- If non mandatory keys in "match_dict" we need to make sure:
- 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
- ("resp_length" is integer or "-", "resp_time" is integer or float)
-
- """
- if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
- return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
-
- pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
- if not (pattern and isinstance(pattern, str)):
- return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
-
- resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
-
- if not isinstance(resp_time_func, int):
- return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
-
- try:
- regex = re.compile(pattern)
- except re.error as error:
- return find_regex_return(msg='Pattern compile error: %s' % str(error))
-
- match = regex.search(last_line)
- if not match:
- return find_regex_return(msg='Custom log: pattern search FAILED')
-
- match_dict = match.groupdict() or None
- if match_dict is None:
- return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
- ' (you need to use ?P<subgroup_name>)')
- mandatory_dict = {'address': r'[\w.:-]+',
- 'code': r'[1-9]\d{2}',
- 'bytes_sent': r'\d+|-'}
- optional_dict = {'resp_length': r'\d+|-',
- 'resp_time': r'[\d.]+',
- 'resp_time_upstream': r'[\d.-]+',
- 'method': r'[A-Z]+',
- 'http_version': r'\d(?:.\d)?'}
-
- mandatory_values = set(mandatory_dict) - set(match_dict)
- if mandatory_values:
- return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
- % list(mandatory_values))
- for key in mandatory_dict:
- if not re.search(mandatory_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- optional_values = set(optional_dict) & set(match_dict)
- for key in optional_values:
- if not re.search(optional_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- dot_in_time = '.' in match_dict.get('resp_time', '')
- if dot_in_time:
- self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
- else:
- self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
-
- self.storage['regex'] = regex
- return find_regex_return(match_dict=match_dict)
-
- def get_data_from_request_field(self, match_dict):
- if match_dict.get('request'):
- match_dict = REQUEST_REGEX.search(match_dict['request'])
- if match_dict:
- match_dict = match_dict.groupdict()
- else:
- return
- # requests per url
- if match_dict.get('url') and self.storage['url_pattern']:
- self.get_data_per_pattern(row=match_dict['url'],
- other='url_pattern_other',
- pattern=self.storage['url_pattern'])
- # requests per http method
- if match_dict.get('method'):
- if match_dict['method'] not in self.data:
- self.charts['http_method'].add_dimension([match_dict['method'],
- match_dict['method'],
- 'incremental'])
- self.data[match_dict['method']] = 0
- self.data[match_dict['method']] += 1
- # requests per http version
- if match_dict.get('http_version'):
- dim_id = match_dict['http_version'].replace('.', '_')
- if dim_id not in self.data:
- self.charts['http_version'].add_dimension([dim_id,
- match_dict['http_version'],
- 'incremental'])
- self.data[dim_id] = 0
- self.data[dim_id] += 1
-
- def get_data_per_response_codes_detailed(self, code):
- """
- :param code: str: CODE from parsed line. Ex.: '202, '499'
- :return:
- Calls add_new_dimension method If the value is found for the first time
- """
- if code not in self.data:
- if self.configuration.get('detailed_response_aggregate', True):
- self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
- self.data[code] = 0
- else:
- code_index = int(code[0]) if int(code[0]) < 6 else 6
- chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
- self.charts[chart_key].add_dimension([code, code, 'incremental'])
- self.data[code] = 0
- self.data[code] += 1
-
- def get_data_per_pattern(self, row, other, pattern):
- """
- :param row: str:
- :param other: str:
- :param pattern: named tuple: (['pattern_description', 'regular expression'])
- :return:
- Scan through string looking for the first location where patterns produce a match for all user
- defined patterns
- """
- match = None
- for elem in pattern:
- if elem.func(row):
- self.data[elem.description] += 1
- match = True
- break
- if not match:
- self.data[other] += 1
-
- def get_data_per_statuses(self, code):
- """
- :param code: str: response status code. Ex.: '202', '499'
- :return:
- """
- code_class = code[0]
- if code_class == '2' or code == '304' or code_class == '1':
- self.data['successful_requests'] += 1
- elif code_class == '3':
- self.data['redirects'] += 1
- elif code_class == '4':
- self.data['bad_requests'] += 1
- elif code_class == '5':
- self.data['server_errors'] += 1
- else:
- self.data['other_requests'] += 1
-
-
-class ApacheCache:
- def __init__(self, service):
- self.service = service
- self.order = ORDER_APACHE_CACHE
- self.definitions = CHARTS_APACHE_CACHE
-
- @staticmethod
- def check():
- return True
-
- @staticmethod
- def get_data(raw_data=None):
- data = dict(hit=0, miss=0, other=0)
- if not raw_data:
- return None if raw_data is None else data
-
- for line in raw_data:
- if 'cache hit' in line:
- data['hit'] += 1
- elif 'cache miss' in line:
- data['miss'] += 1
- else:
- data['other'] += 1
- return data
-
-
-class Squid:
- def __init__(self, service):
- self.service = service
- self.order = ORDER_SQUID
- self.definitions = CHARTS_SQUID
- self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
- self.storage = dict()
- self.data = {'duration_max': 0, 'duration_avg': 0, 'duration_min': 0, 'bytes': 0,
- '0xx': 0, '1xx': 0, '2xx': 0, '3xx': 0, '4xx': 0, '5xx': 0,
- 'other': 0, 'unmatched': 0, 'unique_ipv4': 0, 'unique_ipv6': 0,
- 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0
- }
-
- def __getattr__(self, item):
- return getattr(self.service, item)
-
- def check(self):
- last_line = read_last_line(self.log_path)
- if not last_line:
- return False
- self.storage['unique_all_time'] = list()
- self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
- r' (?P<client_address>[\da-f.:]+)'
- r' (?P<squid_code>[A-Z_]+)/'
- r'(?P<http_code>[0-9]+)'
- r' (?P<bytes>[0-9]+)'
- r' (?P<method>[A-Z_]+)'
- r' (?P<url>[^ ]+)'
- r' (?P<user>[^ ]+)'
- r' (?P<hier_code>[A-Z_]+)/[\da-f.:-]+'
- r' (?P<mime_type>[^\n]+)')
-
- match = self.storage['regex'].search(last_line)
- if not match:
- self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
- return False
- self.storage['dynamic'] = {
- 'http_code':
- {'chart': 'squid_detailed_response_codes',
- 'func_dim_id': None,
- 'func_dim': None},
- 'hier_code': {
- 'chart': 'squid_hier_code',
- 'func_dim_id': None,
- 'func_dim': lambda v: v.replace('HIER_', '')},
- 'method': {
- 'chart': 'squid_method',
- 'func_dim_id': None,
- 'func_dim': None},
- 'mime_type': {
- 'chart': 'squid_mime_type',
- 'func_dim_id': lambda v: v.split('/')[0],
- 'func_dim': None}}
- if not self.configuration.get('all_time', True):
- self.order.remove('squid_clients_all')
- return True
-
- def get_data(self, raw_data=None):
- if not raw_data:
- return None if raw_data is None else self.data
-
- filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
-
- unique_ip = set()
- timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
-
- for row in filtered_data:
- match = self.storage['regex'].search(row)
- if match:
- match = match.groupdict()
- if match['duration'] != '0':
- get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
- try:
- self.data[match['http_code'][0] + 'xx'] += 1
- except KeyError:
- self.data['other'] += 1
-
- self.get_data_per_statuses(match['http_code'])
-
- self.get_data_per_squid_code(match['squid_code'])
-
- self.data['bytes'] += int(match['bytes'])
-
- proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
- # unique clients ips
- if self.configuration.get('all_time', True):
- if address_not_in_pool(pool=self.storage['unique_all_time'],
- address=match['client_address'],
- pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
- self.data['unique_tot_' + proto] += 1
-
- if match['client_address'] not in unique_ip:
- self.data['unique_' + proto] += 1
- unique_ip.add(match['client_address'])
-
- for key, values in self.storage['dynamic'].items():
- if match[key] == '-':
- continue
- dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
- if dimension_id not in self.data:
- dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
- self.charts[values['chart']].add_dimension([dimension_id,
- dimension,
- 'incremental'])
- self.data[dimension_id] = 0
- self.data[dimension_id] += 1
- else:
- self.data['unmatched'] += 1
-
- for elem in timings:
- self.data[elem + '_min'] += timings[elem]['minimum']
- self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
- self.data[elem + '_max'] += timings[elem]['maximum']
- return self.data
-
- def get_data_per_statuses(self, code):
- """
- :param code: str: response status code. Ex.: '202', '499'
- :return:
- """
- code_class = code[0]
- if code_class == '2' or code == '304' or code_class == '1' or code == '000':
- self.data['successful_requests'] += 1
- elif code_class == '3':
- self.data['redirects'] += 1
- elif code_class == '4':
- self.data['bad_requests'] += 1
- elif code_class == '5' or code_class == '6':
- self.data['server_errors'] += 1
- else:
- self.data['other_requests'] += 1
-
- def get_data_per_squid_code(self, code):
- """
- :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
- :return:
- """
- if code not in self.data:
- self.charts['squid_code'].add_dimension([code, code, 'incremental'])
- self.data[code] = 0
- self.data[code] += 1
-
- for tag in code.split('_'):
- try:
- chart_key = SQUID_CODES[tag]
- except KeyError:
- continue
- dimension_id = '_'.join(['code_detailed', tag])
- if dimension_id not in self.data:
- self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental'])
- self.data[dimension_id] = 0
- self.data[dimension_id] += 1
-
-
-def get_timings(timings, time):
- """
- :param timings:
- :param time:
- :return:
- """
- if timings['minimum'] is None:
- timings['minimum'] = time
- if time > timings['maximum']:
- timings['maximum'] = time
- elif time < timings['minimum']:
- timings['minimum'] = time
- timings['summary'] += time
- timings['count'] += 1
-
-def get_hist(index, buckets, time):
- """
- :param index: histogram index (Ex. [10, 50, 100, 150, ...])
- :param buckets: histogram buckets
- :param time: time
- :return: None
- """
- for i in range(len(index)-1, -1, -1):
- if time <= index[i]:
- buckets[i] += 1
- else:
- break
-
-def address_not_in_pool(pool, address, pool_size):
- """
- :param pool: list of ip addresses
- :param address: ip address
- :param pool_size: current pool size
- :return: True if address not in pool. False otherwise.
- """
- index = bisect.bisect_left(pool, address)
- if index < pool_size:
- if pool[index] == address:
- return False
- bisect.insort_left(pool, address)
- return True
- bisect.insort_left(pool, address)
- return True
-
-
-def find_regex_return(match_dict=None, msg='Generic error message'):
- """
- :param match_dict: dict: re.search.groupdict() or None
- :param msg: str: error description
- :return: tuple:
- """
- return match_dict, msg
-
-
-def check_patterns(string, dimension_regex_dict):
- """
- :param string: str:
- :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
- :return: list of named tuples or None:
- We need to make sure all patterns are valid regular expressions
- """
- if not hasattr(dimension_regex_dict, 'keys'):
- return None
-
- result = list()
-
- def valid_pattern(pattern):
- """
- :param pattern: str
- :return: re.compile(pattern) or None
- """
- if not isinstance(pattern, str):
- return False
- try:
- return re.compile(pattern)
- except re.error:
- return False
-
- def func_search(pattern):
- def closure(v):
- return pattern.search(v)
-
- return closure
-
- for dimension, regex in dimension_regex_dict.items():
- valid = valid_pattern(regex)
- if isinstance(dimension, str) and valid_pattern:
- func = func_search(valid)
- result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
- func=func))
- return result or None
-
-
-def filter_data(raw_data, pre_filter):
- """
- :param raw_data:
- :param pre_filter:
- :return:
- """
-
- if not pre_filter:
- return raw_data
- filtered = raw_data
- for elem in pre_filter:
- if elem.description == 'filter_include':
- filtered = filter(elem.func, filtered)
- elif elem.description == 'filter_exclude':
- filtered = filterfalse(elem.func, filtered)
- return filtered