summaryrefslogtreecommitdiffstats
path: root/python.d
diff options
context:
space:
mode:
authorLennart Weller <lhw@ring0.de>2017-01-24 15:21:16 +0000
committerLennart Weller <lhw@ring0.de>2017-01-24 15:21:16 +0000
commitef0c127e7f95d2db2715b9e99fe758eebc7dabd3 (patch)
treeea5d62342aba06f376f3be63aab898503b56f3ec /python.d
parentupdate watch file and files-exclude (diff)
parentNew upstream version 1.5.0+dfsg (diff)
downloadnetdata-ef0c127e7f95d2db2715b9e99fe758eebc7dabd3.tar.xz
netdata-ef0c127e7f95d2db2715b9e99fe758eebc7dabd3.zip
Merge tag 'upstream/1.5.0+dfsg'
Upstream version 1.5.0+dfsg
Diffstat (limited to 'python.d')
-rw-r--r--python.d/Makefile.am72
-rw-r--r--python.d/Makefile.in103
-rw-r--r--python.d/README.md482
-rw-r--r--python.d/bind_rndc.chart.py180
-rw-r--r--python.d/cpufreq.chart.py83
-rw-r--r--python.d/cpuidle.chart.py143
-rw-r--r--python.d/dovecot.chart.py52
-rw-r--r--python.d/elasticsearch.chart.py402
-rw-r--r--python.d/fail2ban.chart.py91
-rw-r--r--python.d/freeradius.chart.py114
-rw-r--r--python.d/gunicorn_log.chart.py72
-rw-r--r--python.d/haproxy.chart.py177
-rw-r--r--python.d/hddtemp.chart.py35
-rw-r--r--python.d/ipfs.chart.py110
-rw-r--r--python.d/isc_dhcpd.chart.py167
-rw-r--r--python.d/mdstat.chart.py99
-rw-r--r--python.d/memcached.chart.py88
-rw-r--r--python.d/mysql.chart.py117
-rw-r--r--python.d/nginx.chart.py8
-rw-r--r--python.d/nginx_log.chart.py39
-rw-r--r--python.d/ovpn_status_log.chart.py77
-rwxr-xr-xpython.d/phpfpm.chart.py28
-rw-r--r--python.d/postgres.chart.py386
-rw-r--r--python.d/python_modules/base.py297
-rw-r--r--python.d/python_modules/msg.py63
-rw-r--r--python.d/python_modules/pyyaml2/__init__.py315
-rw-r--r--python.d/python_modules/pyyaml2/composer.py139
-rw-r--r--python.d/python_modules/pyyaml2/constructor.py675
-rw-r--r--python.d/python_modules/pyyaml2/cyaml.py85
-rw-r--r--python.d/python_modules/pyyaml2/dumper.py62
-rw-r--r--python.d/python_modules/pyyaml2/emitter.py1140
-rw-r--r--python.d/python_modules/pyyaml2/error.py75
-rw-r--r--python.d/python_modules/pyyaml2/events.py86
-rw-r--r--python.d/python_modules/pyyaml2/loader.py40
-rw-r--r--python.d/python_modules/pyyaml2/nodes.py49
-rw-r--r--python.d/python_modules/pyyaml2/parser.py589
-rw-r--r--python.d/python_modules/pyyaml2/reader.py190
-rw-r--r--python.d/python_modules/pyyaml2/representer.py484
-rw-r--r--python.d/python_modules/pyyaml2/resolver.py224
-rw-r--r--python.d/python_modules/pyyaml2/scanner.py1457
-rw-r--r--python.d/python_modules/pyyaml2/serializer.py111
-rw-r--r--python.d/python_modules/pyyaml2/tokens.py104
-rw-r--r--python.d/python_modules/pyyaml3/__init__.py312
-rw-r--r--python.d/python_modules/pyyaml3/composer.py139
-rw-r--r--python.d/python_modules/pyyaml3/constructor.py686
-rw-r--r--python.d/python_modules/pyyaml3/cyaml.py85
-rw-r--r--python.d/python_modules/pyyaml3/dumper.py62
-rw-r--r--python.d/python_modules/pyyaml3/emitter.py1137
-rw-r--r--python.d/python_modules/pyyaml3/error.py75
-rw-r--r--python.d/python_modules/pyyaml3/events.py86
-rw-r--r--python.d/python_modules/pyyaml3/loader.py40
-rw-r--r--python.d/python_modules/pyyaml3/nodes.py49
-rw-r--r--python.d/python_modules/pyyaml3/parser.py589
-rw-r--r--python.d/python_modules/pyyaml3/reader.py192
-rw-r--r--python.d/python_modules/pyyaml3/representer.py374
-rw-r--r--python.d/python_modules/pyyaml3/resolver.py224
-rw-r--r--python.d/python_modules/pyyaml3/scanner.py1448
-rw-r--r--python.d/python_modules/pyyaml3/serializer.py111
-rw-r--r--python.d/python_modules/pyyaml3/tokens.py104
-rw-r--r--python.d/redis.chart.py110
-rw-r--r--python.d/sensors.chart.py20
-rw-r--r--python.d/squid.chart.py19
-rw-r--r--python.d/varnish.chart.py239
63 files changed, 3447 insertions, 11964 deletions
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
index d769e313..883f06c4 100644
--- a/python.d/Makefile.am
+++ b/python.d/Makefile.am
@@ -1,46 +1,58 @@
MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
CLEANFILES = \
- python-modules-installer.sh \
- $(NULL)
+ python-modules-installer.sh \
+ $(NULL)
include $(top_srcdir)/build/subst.inc
SUFFIXES = .in
dist_python_SCRIPTS = \
- apache.chart.py \
- apache_cache.chart.py \
- cpufreq.chart.py \
- dovecot.chart.py \
- example.chart.py \
- exim.chart.py \
- hddtemp.chart.py \
- ipfs.chart.py \
- memcached.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_log.chart.py \
- phpfpm.chart.py \
- postfix.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- sensors.chart.py \
- squid.chart.py \
- tomcat.chart.py \
- python-modules-installer.sh \
- $(NULL)
+ apache.chart.py \
+ apache_cache.chart.py \
+ bind_rndc.chart.py \
+ cpufreq.chart.py \
+ cpuidle.chart.py \
+ dovecot.chart.py \
+ elasticsearch.chart.py \
+ example.chart.py \
+ exim.chart.py \
+ fail2ban.chart.py \
+ freeradius.chart.py \
+ gunicorn_log.chart.py \
+ haproxy.chart.py \
+ hddtemp.chart.py \
+ ipfs.chart.py \
+ isc_dhcpd.chart.py \
+ mdstat.chart.py \
+ memcached.chart.py \
+ mysql.chart.py \
+ nginx.chart.py \
+ nginx_log.chart.py \
+ ovpn_status_log.chart.py \
+ phpfpm.chart.py \
+ postfix.chart.py \
+ postgres.chart.py \
+ redis.chart.py \
+ retroshare.chart.py \
+ sensors.chart.py \
+ squid.chart.py \
+ tomcat.chart.py \
+ varnish.chart.py \
+ python-modules-installer.sh \
+ $(NULL)
dist_python_DATA = \
- README.md \
- $(NULL)
+ README.md \
+ $(NULL)
pythonmodulesdir=$(pythondir)/python_modules
dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- python_modules/base.py \
- python_modules/msg.py \
- python_modules/lm_sensors.py \
- $(NULL)
+ python_modules/__init__.py \
+ python_modules/base.py \
+ python_modules/msg.py \
+ python_modules/lm_sensors.py \
+ $(NULL)
pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
dist_pythonyaml2_DATA = \
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
index 426f43ed..018e8b3e 100644
--- a/python.d/Makefile.in
+++ b/python.d/Makefile.in
@@ -1,7 +1,7 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# Makefile.in generated by automake 1.15 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+# Copyright (C) 1994-2014 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -16,7 +16,17 @@
VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
@@ -79,10 +89,6 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
- $(dist_python_DATA) $(dist_pythonmodules_DATA) \
- $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
subdir = python.d
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
@@ -90,10 +96,15 @@ am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c_mallinfo.m4 \
$(top_srcdir)/m4/ax_c_mallopt.m4 \
$(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
$(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
$(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
+ $(dist_python_DATA) $(dist_pythonmodules_DATA) \
+ $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \
+ $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
@@ -151,6 +162,7 @@ am__can_run_installinfo = \
DATA = $(dist_python_DATA) $(dist_pythonmodules_DATA) \
$(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
@@ -290,44 +302,56 @@ varlibdir = @varlibdir@
webdir = @webdir@
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
- python-modules-installer.sh \
- $(NULL)
+ python-modules-installer.sh \
+ $(NULL)
SUFFIXES = .in
dist_python_SCRIPTS = \
- apache.chart.py \
- apache_cache.chart.py \
- cpufreq.chart.py \
- dovecot.chart.py \
- example.chart.py \
- exim.chart.py \
- hddtemp.chart.py \
- ipfs.chart.py \
- memcached.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_log.chart.py \
- phpfpm.chart.py \
- postfix.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- sensors.chart.py \
- squid.chart.py \
- tomcat.chart.py \
- python-modules-installer.sh \
- $(NULL)
+ apache.chart.py \
+ apache_cache.chart.py \
+ bind_rndc.chart.py \
+ cpufreq.chart.py \
+ cpuidle.chart.py \
+ dovecot.chart.py \
+ elasticsearch.chart.py \
+ example.chart.py \
+ exim.chart.py \
+ fail2ban.chart.py \
+ freeradius.chart.py \
+ gunicorn_log.chart.py \
+ haproxy.chart.py \
+ hddtemp.chart.py \
+ ipfs.chart.py \
+ isc_dhcpd.chart.py \
+ mdstat.chart.py \
+ memcached.chart.py \
+ mysql.chart.py \
+ nginx.chart.py \
+ nginx_log.chart.py \
+ ovpn_status_log.chart.py \
+ phpfpm.chart.py \
+ postfix.chart.py \
+ postgres.chart.py \
+ redis.chart.py \
+ retroshare.chart.py \
+ sensors.chart.py \
+ squid.chart.py \
+ tomcat.chart.py \
+ varnish.chart.py \
+ python-modules-installer.sh \
+ $(NULL)
dist_python_DATA = \
- README.md \
- $(NULL)
+ README.md \
+ $(NULL)
pythonmodulesdir = $(pythondir)/python_modules
dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- python_modules/base.py \
- python_modules/msg.py \
- python_modules/lm_sensors.py \
- $(NULL)
+ python_modules/__init__.py \
+ python_modules/base.py \
+ python_modules/msg.py \
+ python_modules/lm_sensors.py \
+ $(NULL)
pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
dist_pythonyaml2_DATA = \
@@ -387,7 +411,6 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu python.d/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --gnu python.d/Makefile
-.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
@@ -396,7 +419,7 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
-$(top_srcdir)/build/subst.inc:
+$(top_srcdir)/build/subst.inc $(am__empty):
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
@@ -689,6 +712,8 @@ uninstall-am: uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
uninstall-dist_pythonSCRIPTS uninstall-dist_pythonmodulesDATA \
uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA
+.PRECIOUS: Makefile
+
.in:
if sed \
-e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
diff --git a/python.d/README.md b/python.d/README.md
index a3c7b4fa..75f5614a 100644
--- a/python.d/README.md
+++ b/python.d/README.md
@@ -125,12 +125,82 @@ If no configuration is given, module will attempt to read log file at `/var/log/
---
+# bind_rndc
+
+Module parses bind dump file to collect real-time performance metrics
+
+**Requirements:**
+ * Version of bind must be 9.6 +
+ * Netdata must have permissions to run `rndc status`
+
+It produces:
+
+1. **Name server statistics**
+ * requests
+ * responses
+ * success
+ * auth_answer
+ * nonauth_answer
+ * nxrrset
+ * failure
+ * nxdomain
+ * recursion
+ * duplicate
+ * rejections
+
+2. **Incoming queries**
+ * RESERVED0
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * MX
+ * TXT
+ * X25
+ * AAAA
+ * SRV
+ * NAPTR
+ * A6
+ * DS
+ * RSIG
+ * DNSKEY
+ * SPF
+ * ANY
+ * DLV
+
+3. **Outgoing queries**
+ * Same as Incoming queries
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ named_stats_path : '/var/log/bind/named.stats'
+```
+
+If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+---
+
# cpufreq
-Module shows current cpu frequency by looking at appropriate files in /sys/devices
+This module shows the current CPU frequency as set by the cpufreq kernel
+module.
**Requirement:**
-Processor which presents data scaling frequency data
+You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
+enabled in your kernel.
+
+This module tries to read from one of two possible locations. On
+initialization, it tries to read the `time_in_state` files provided by
+cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
+falls back to using the more inaccurate `scaling_cur_freq` file (which only
+represents the **current** CPU frequency, and doesn't account for any state
+changes which happen between updates).
It produces one chart with multiple lines (one line per core).
@@ -142,11 +212,23 @@ Sample:
sys_dir: "/sys/devices"
```
-If no configuration is given, module will search for `scaling_cur_freq` files in `/sys/devices` directory.
+If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
---
+# cpuidle
+
+This module monitors the usage of CPU idle states.
+
+**Requirement:**
+Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
+
+It produces one stacked chart per CPU, showing the percentage of time spent in
+each state.
+
+---
+
# dovecot
This module provides statistics information from dovecot server.
@@ -158,45 +240,47 @@ Dovecot unix socket with R/W permissions for user netdata or dovecot with config
Module gives information with following charts:
-1. **logins and sessions**
- * logins
+1. **sessions**
* active sessions
-2. **commands** - number of IMAP commands
+2. **logins**
+ * logins
+
+3. **commands** - number of IMAP commands
* commands
-3. **Faults**
+4. **Faults**
* minor
* major
-4. **Context Switches**
+5. **Context Switches**
* volountary
* involountary
-5. **disk** in bytes/s
+6. **disk** in bytes/s
* read
* write
-6. **bytes** in bytes/s
+7. **bytes** in bytes/s
* read
* write
-7. **number of syscalls** in syscalls/s
+8. **number of syscalls** in syscalls/s
* read
* write
-8. **lookups** - number of lookups per second
+9. **lookups** - number of lookups per second
* path
* attr
-9. **hits** - number of cache hits
+10. **hits** - number of cache hits
* hits
-10. **attempts** - authorization attemts
+11. **attempts** - authorization attemts
* success
* failure
-11. **cache** - cached authorization hits
+12. **cache** - cached authorization hits
* hit
* miss
@@ -219,6 +303,67 @@ If no configuration is given, module will attempt to connect to dovecot using un
---
+# elasticsearch
+
+Module monitor elasticsearch performance and health metrics
+
+It produces:
+
+1. **Search performance** charts:
+ * Number of queries, fetches
+ * Time spent on queries, fetches
+ * Query and fetch latency
+
+2. **Indexing performance** charts:
+ * Number of documents indexed, index refreshes, flushes
+ * Time spent on indexing, refreshing, flushing
+ * Indexing and flushing latency
+
+3. **Memory usage and garbace collection** charts:
+ * JVM heap currently in use, commited
+ * Count of garbage collections
+ * Time spent on garbage collections
+
+4. **Host metrics** charts:
+ * Available file descriptors in percent
+ * Opened HTTP connections
+ * Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+ * Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+ * Fielddata cache size
+ * Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+ * Cluster status
+ * Nodes and tasks statistics
+ * Shards statistics
+
+8. **Cluster stats API** charts:
+ * Nodes statistics
+ * Query cache statistics
+ * Docs statistics
+ * Store statistics
+ * Indices and shards statistics
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'ipaddress' # Server ip address or hostname
+ port : 'password' # Port on which elasticsearch listed
+ cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
+ cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
# exim
Simple module executing `exim -bpc` to grab exim queue.
@@ -233,6 +378,151 @@ Configuration is not needed.
---
+# fail2ban
+
+Module monitor fail2ban log file to show all bans for all active jails
+
+**Requirements:**
+ * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+It produces one chart with multiple lines (one line per jail)
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ exclude: 'dropbear apache'
+```
+If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
+If conf file is not found default jail is `ssh`.
+
+---
+
+# freeradius
+
+Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
+
+It produces:
+
+1. **Authentication counters:**
+ * access-accepts
+ * access-rejects
+ * auth-dropped-requests
+ * auth-duplicate-requests
+ * auth-invalid-requests
+ * auth-malformed-requests
+ * auth-unknown-types
+
+2. **Accounting counters:** [optional]
+ * accounting-requests
+ * accounting-responses
+ * acct-dropped-requests
+ * acct-duplicate-requests
+ * acct-invalid-requests
+ * acct-malformed-requests
+ * acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+ * proxy-access-accepts
+ * proxy-access-rejects
+ * proxy-auth-dropped-requests
+ * proxy-auth-duplicate-requests
+ * proxy-auth-invalid-requests
+ * proxy-auth-malformed-requests
+ * proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+ * proxy-accounting-requests
+ * proxy-accounting-responses
+ * proxy-acct-dropped-requests
+ * proxy-acct-duplicate-requests
+ * proxy-acct-invalid-requests
+ * proxy-acct-malformed-requests
+ * proxy-acct-unknown-typesa
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'localhost'
+ port : '18121'
+ secret : 'adminsecret'
+ acct : False # Freeradius accounting statistics.
+ proxy_auth : False # Freeradius proxy authentication statistics.
+ proxy_acct : False # Freeradius proxy accounting statistics.
+```
+
+**Freeradius server configuration:**
+
+The configuration for the status server is automatically created in the sites-available directory.
+By default, server is enabled and can be queried from every client.
+FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+
+To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+ * cd sites-enabled
+ * ln -s ../sites-available/status status
+
+and restart/reload your FREERADIUS server.
+
+---
+
+# haproxy
+
+Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from url **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable AND writable by netdata user.
+
+It produces:
+
+1. **Frontend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+2. **Backend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+3. **Health** chart
+ * number of failed servers for every backend (in DOWN state)
+
+
+### configuration
+
+Sample:
+
+```yaml
+via_url:
+ user : 'username' # ONLY IF stats auth is used
+ pass : 'password' # # ONLY IF stats auth is used
+ url : 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket : 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
# hddtemp
Module monitors disk temperatures from one or more hddtemp daemons.
@@ -281,6 +571,69 @@ localhost:
---
+# isc_dhcpd
+
+Module monitor leases database to show all active leases for given pools.
+
+**Requirements:**
+ * dhcpd leases file MUST BE readable by netdata
+ * pools MUST BE in CIDR format
+
+It produces:
+
+1. **Pools utilization** Aggregate chart for all pools.
+ * utilization in percent
+
+2. **Total leases**
+ * leases (overall number of leases for all pools)
+
+3. **Active leases** for every pools
+ * leases (number of active leases in pool)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ leases_path : '/var/lib/dhcp/dhcpd.leases'
+ pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+```
+
+In case of python2 you need to install `py2-ipaddress` to make plugin work.
+The module will not work If no configuration is given.
+
+---
+
+
+# mdstat
+
+Module monitor /proc/mdstat
+
+It produces:
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+ * total (number of devices array ideally would have)
+ * inuse (number of devices currently are in use)
+
+3. **Current status**
+ * resync in percent
+ * recovery in percent
+ * reshape in percent
+ * check in percent
+
+4. **Operation status** (if resync/recovery/reshape/check is active)
+ * finish in minutes
+ * speed in megabytes/s
+
+### configuration
+No configuration is needed.
+
+---
+
# memcached
Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
@@ -515,6 +868,39 @@ When no configuration file is found, module tries to parse `/var/log/nginx/acces
---
+# ovpn_status_log
+
+Module monitor openvpn-status log file.
+
+**Requirements:**
+
+ * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
+
+ * Make sure NETDATA USER CAN READ openvpn-status.log
+
+ * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+
+It produces:
+
+1. **Users** OpenVPN active users
+ * users
+
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+ * in
+ * out
+
+### configuration
+
+Sample:
+
+```yaml
+default
+ log_path : '/var/log/openvpn-status.log'
+```
+
+---
+
# phpfpm
This module will monitor one or more php-fpm instances depending on configuration.
@@ -699,3 +1085,69 @@ Without configuration, module attempts to connect to `http://localhost:8080/mana
So it will probably fail.
---
+
+# varnish cache
+
+Module uses the `varnishstat` command to provide varnish cache statistics.
+
+It produces:
+
+1. **Client metrics**
+ * session accepted
+ * session dropped
+ * good client requests received
+
+2. **All history hit rate ratio**
+ * cache hits in percent
+ * cache miss in percent
+ * cache hits for pass percent
+
+3. **Curent poll hit rate ratio**
+ * cache hits in percent
+ * cache miss in percent
+ * cache hits for pass percent
+
+4. **Thread-related metrics** (only for varnish version 4+)
+ * total number of threads
+ * threads created
+ * threads creation failed
+ * threads hit max
+ * length os session queue
+ * sessions queued for thread
+
+5. **Backend health**
+ * backend conn. success
+ * backend conn. not attempted
+ * backend conn. too many
+ * backend conn. failures
+ * backend conn. reuses
+ * backend conn. recycles
+ * backend conn. retry
+ * backend requests made
+
+6. **Memory usage**
+ * memory available in megabytes
+ * memory allocated in megabytes
+
+7. **Problems summary**
+ * session dropped
+ * session accept failures
+ * session pipe overflow
+ * backend conn. not attempted
+ * fetch failed (all causes)
+ * backend conn. too many
+ * threads hit max
+ * threads destroyed
+ * length of session queue
+ * HTTP header overflows
+ * ESI parse errors
+ * ESI parse warnings
+
+8. **Uptime**
+ * varnish instance uptime in seconds
+
+### configuration
+
+No configuration is needed.
+
+---
diff --git a/python.d/bind_rndc.chart.py b/python.d/bind_rndc.chart.py
new file mode 100644
index 00000000..e11c751f
--- /dev/null
+++ b/python.d/bind_rndc.chart.py
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+# Description: bind rndc netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from re import compile, findall
+from os.path import getsize, isfile, split
+from os import access as is_accessible, R_OK
+from subprocess import Popen
+
+priority = 60000
+retries = 60
+update_every = 30
+
+DIRECTORIES = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
+NMS = ['requests', 'responses', 'success', 'auth_answer', 'nonauth_answer', 'nxrrset', 'failure',
+ 'nxdomain', 'recursion', 'duplicate', 'rejections']
+QUERIES = ['RESERVED0', 'A', 'NS', 'CNAME', 'SOA', 'PTR', 'MX', 'TXT', 'X25', 'AAAA', 'SRV', 'NAPTR',
+ 'A6', 'DS', 'RRSIG', 'DNSKEY', 'SPF', 'ANY', 'DLV']
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
+ self.regex_values = compile(r'([0-9]+) ([^\n]+)')
+ # self.options = ['Incoming Requests', 'Incoming Queries', 'Outgoing Queries',
+ # 'Name Server Statistics', 'Zone Maintenance Statistics', 'Resolver Statistics',
+ # 'Cache DB RRsets', 'Socket I/O Statistics']
+ self.options = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
+ self.regex_options = [r'(%s(?= \+\+)) \+\+([^\+]+)' % option for option in self.options]
+ try:
+ self.rndc = [''.join([directory, 'rndc']) for directory in DIRECTORIES
+ if isfile(''.join([directory, 'rndc']))][0]
+ except IndexError:
+ self.rndc = False
+
+ def check(self):
+ # We cant start without 'rndc' command
+ if not self.rndc:
+ self.error('Command "rndc" not found')
+ return False
+
+ # We cant start if stats file is not exist or not readable by netdata user
+ if not is_accessible(self.named_stats_path, R_OK):
+ self.error('Cannot access file %s' % self.named_stats_path)
+ return False
+
+ size_before = getsize(self.named_stats_path)
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+ size_after = getsize(self.named_stats_path)
+
+ # We cant start if netdata user has no permissions to run 'rndc stats'
+ if not run_rndc.returncode:
+ # 'rndc' was found, stats file is exist and readable and we can run 'rndc stats'. Lets go!
+ self.create_charts()
+
+ # BIND APPEND dump on every run 'rndc stats'
+ # that is why stats file size can be VERY large if update_interval too small
+ dump_size_24hr = round(86400 / self.update_every * (int(size_after) - int(size_before)) / 1048576, 3)
+
+ # If update_every too small we should WARN user
+ if self.update_every < 30:
+ self.info('Update_every %s is NOT recommended for use. Increase the value to > 30' % self.update_every)
+
+ self.info('With current update_interval it will be + %s MB every 24hr. '
+ 'Don\'t forget to create logrotate conf file for %s' % (dump_size_24hr, self.named_stats_path))
+
+ self.info('Plugin was started successfully.')
+
+ return True
+ else:
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
+
+ def _get_raw_data(self):
+ """
+ Run 'rndc stats' and read last dump from named.stats
+ :return: tuple(
+ file.read() obj,
+ named.stats file size
+ )
+ """
+
+ try:
+ current_size = getsize(self.named_stats_path)
+ except OSError:
+ return None, None
+
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None, None
+
+ try:
+ with open(self.named_stats_path) as bind_rndc:
+ bind_rndc.seek(current_size)
+ result = bind_rndc.read()
+ except OSError:
+ return None, None
+ else:
+ return result, current_size
+
+ def _get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+
+ raw_data, size = self._get_raw_data()
+
+ if raw_data is None:
+ return None
+
+ rndc_stats = dict()
+
+ # Result: dict.
+ # topic = Cache DB RRsets; body = A 178303 NS 86790 ... ; desc = A; value = 178303
+ # {'Cache DB RRsets': [('A', 178303), ('NS', 286790), ...],
+ # {Incoming Queries': [('RESERVED0', 8), ('A', 4557317680), ...],
+ # ......
+ for regex in self.regex_options:
+ rndc_stats.update({topic: [(desc, int(value)) for value, desc in self.regex_values.findall(body)]
+ for topic, body in findall(regex, raw_data)})
+
+ nms = dict(rndc_stats.get('Name Server Statistics', []))
+
+ inc_queries = {'i' + k: 0 for k in QUERIES}
+ inc_queries.update({'i' + k: v for k, v in rndc_stats.get('Incoming Queries', [])})
+ out_queries = {'o' + k: 0 for k in QUERIES}
+ out_queries.update({'o' + k: v for k, v in rndc_stats.get('Outgoing Queries', [])})
+
+ to_netdata = dict()
+ to_netdata['requests'] = sum([v for k, v in nms.items() if 'request' in k and 'received' in k])
+ to_netdata['responses'] = sum([v for k, v in nms.items() if 'responses' in k and 'sent' in k])
+ to_netdata['success'] = nms.get('queries resulted in successful answer', 0)
+ to_netdata['auth_answer'] = nms.get('queries resulted in authoritative answer', 0)
+ to_netdata['nonauth_answer'] = nms.get('queries resulted in non authoritative answer', 0)
+ to_netdata['nxrrset'] = nms.get('queries resulted in nxrrset', 0)
+ to_netdata['failure'] = sum([nms.get('queries resulted in SERVFAIL', 0), nms.get('other query failures', 0)])
+ to_netdata['nxdomain'] = nms.get('queries resulted in NXDOMAIN', 0)
+ to_netdata['recursion'] = nms.get('queries caused recursion', 0)
+ to_netdata['duplicate'] = nms.get('duplicate queries received', 0)
+ to_netdata['rejections'] = nms.get('recursive queries rejected', 0)
+ to_netdata['stats_size'] = size
+
+ to_netdata.update(inc_queries)
+ to_netdata.update(out_queries)
+ return to_netdata
+
+ def create_charts(self):
+ self.order = ['stats_size', 'bind_stats', 'incoming_q', 'outgoing_q']
+ self.definitions = {
+ 'bind_stats': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'Name Server Statistics', 'bind_rndc.stats', 'line'],
+ 'lines': [
+ ]},
+ 'incoming_q': {
+ 'options': [None, 'Incoming queries', 'queries','Incoming queries', 'bind_rndc.incq', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_q': {
+ 'options': [None, 'Outgoing queries', 'queries','Outgoing queries', 'bind_rndc.outq', 'line'],
+ 'lines': [
+ ]},
+ 'stats_size': {
+ 'options': [None, '%s file size' % split(self.named_stats_path)[1].capitalize(), 'megabytes',
+ '%s size' % split(self.named_stats_path)[1].capitalize(), 'bind_rndc.size', 'line'],
+ 'lines': [
+ ["stats_size", None, "absolute", 1, 1048576]
+ ]}
+ }
+ for elem in QUERIES:
+ self.definitions['incoming_q']['lines'].append(['i' + elem, elem, 'incremental'])
+ self.definitions['outgoing_q']['lines'].append(['o' + elem, elem, 'incremental'])
+
+ for elem in NMS:
+ self.definitions['bind_stats']['lines'].append([elem, None, 'incremental'])
diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py
index a9de5ced..c761aae8 100644
--- a/python.d/cpufreq.chart.py
+++ b/python.d/cpufreq.chart.py
@@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
# Description: cpufreq netdata python.d module
-# Author: Pawel Krupa (paulfantom)
+# Author: Pawel Krupa (paulfantom) and Steven Noonan (tycho)
+import glob
import os
+import time
from base import SimpleService
# default module values (can be overridden per job in `config`)
@@ -18,29 +20,53 @@ CHARTS = {
]}
}
-
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
prefix = os.getenv('NETDATA_HOST_PREFIX', "")
if prefix.endswith('/'):
prefix = prefix[:-1]
self.sys_dir = prefix + "/sys/devices"
- self.filename = "scaling_cur_freq"
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self._orig_name = ""
self.assignment = {}
- self.paths = []
+ self.accurate_exists = True
+ self.accurate_last = {}
def _get_data(self):
- raw = {}
- for path in self.paths:
- with open(path, 'r') as f:
- raw[path] = f.read()
data = {}
- for path in self.paths:
- data[self.assignment[path]] = raw[path]
+
+ if self.accurate_exists:
+ elapsed = time.time() - self.timetable['last']
+
+ accurate_ok = True
+
+ for name, paths in self.assignment.items():
+ last = self.accurate_last[name]
+ current = 0
+ for line in open(paths['accurate'], 'r'):
+ line = list(map(int, line.split()))
+ current += (line[0] * line[1]) / 100
+ delta = current - last
+ data[name] = delta
+ self.accurate_last[name] = current
+ if delta == 0 or abs(delta) > 1e7:
+ # Delta is either too large or nonexistent, fall back to
+ # less accurate reading. This can happen if we switch
+ # to/from the 'schedutil' governor, which doesn't report
+ # stats.
+ accurate_ok = False
+
+ if accurate_ok:
+ return data
+ else:
+ self.alert("accurate method failed, falling back")
+
+
+ for name, paths in self.assignment.items():
+ data[name] = open(paths['inaccurate'], 'r').read()
+
return data
def check(self):
@@ -51,23 +77,30 @@ class Service(SimpleService):
self._orig_name = self.chart_name
- for dirpath, _, filenames in os.walk(self.sys_dir):
- if self.filename in filenames:
- self.paths.append(dirpath + "/" + self.filename)
-
- if len(self.paths) == 0:
- self.error("cannot find", self.filename)
+ for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/stats/time_in_state'):
+ path_elem = path.split('/')
+ cpu = path_elem[-4]
+ if cpu not in self.assignment:
+ self.assignment[cpu] = {}
+ self.assignment[cpu]['accurate'] = path
+ self.accurate_last[cpu] = 0
+
+ if len(self.assignment) == 0:
+ self.accurate_exists = False
+
+ for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'):
+ path_elem = path.split('/')
+ cpu = path_elem[-3]
+ if cpu not in self.assignment:
+ self.assignment[cpu] = {}
+ self.assignment[cpu]['inaccurate'] = path
+
+ if len(self.assignment) == 0:
+ self.error("couldn't find a method to read cpufreq statistics")
return False
- self.paths.sort()
- i = 0
- for path in self.paths:
- self.assignment[path] = "cpu" + str(i)
- i += 1
-
- for name in self.assignment:
- dim = self.assignment[name]
- self.definitions[ORDER[0]]['lines'].append([dim, dim, 'absolute', 1, 1000])
+ for name in self.assignment.keys():
+ self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
return True
diff --git a/python.d/cpuidle.chart.py b/python.d/cpuidle.chart.py
new file mode 100644
index 00000000..f7199aeb
--- /dev/null
+++ b/python.d/cpuidle.chart.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Description: cpuidle netdata python.d module
+# Author: Steven Noonan (tycho)
+
+import glob
+import os
+import platform
+import time
+from base import SimpleService
+
+import ctypes
+syscall = ctypes.CDLL('libc.so.6').syscall
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ prefix = os.getenv('NETDATA_HOST_PREFIX', "")
+ if prefix.endswith('/'):
+ prefix = prefix[:-1]
+ self.sys_dir = prefix + "/sys/devices/system/cpu"
+ self.schedstat_path = prefix + "/proc/schedstat"
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = []
+ self.definitions = {}
+ self._orig_name = ""
+ self.assignment = {}
+
+ def __gettid(self):
+ # This is horrendous. We need the *thread id* (not the *process id*),
+ # but there's no Python standard library way of doing that. If you need
+ # to enable this module on a non-x86 machine type, you'll have to find
+ # the Linux syscall number for gettid() and add it to the dictionary
+ # below.
+ syscalls = {
+ 'i386': 224,
+ 'x86_64': 186,
+ }
+ if platform.machine() not in syscalls:
+ return None
+ tid = syscall(syscalls[platform.machine()])
+ return tid
+
+ def __wake_cpus(self):
+ # Requires Python 3.3+. This will "tickle" each CPU to force it to
+ # update its idle counters.
+ if hasattr(os, 'sched_setaffinity'):
+ pid = self.__gettid()
+ save_affinity = os.sched_getaffinity(pid)
+ for idx in range(0, len(self.assignment)):
+ os.sched_setaffinity(pid, [idx])
+ os.sched_getaffinity(pid)
+ os.sched_setaffinity(pid, save_affinity)
+
+ def __read_schedstat(self):
+ cpus = {}
+ for line in open(self.schedstat_path, 'r'):
+ if not line.startswith('cpu'):
+ continue
+ line = line.rstrip().split()
+ cpu = line[0]
+ active_time = line[7]
+ cpus[cpu] = int(active_time) // 1000
+ return cpus
+
+ def _get_data(self):
+ results = {}
+
+ # This line is critical for the stats to update. If we don't "tickle"
+ # all the CPUs, then all the counters stop counting.
+ self.__wake_cpus()
+
+ # Use the kernel scheduler stats to determine how much time was spent
+ # in C0 (active).
+ schedstat = self.__read_schedstat()
+
+ for cpu, metrics in self.assignment.items():
+ update_time = schedstat[cpu]
+ results[cpu + '_active_time'] = update_time
+
+ for metric, path in metrics.items():
+ residency = int(open(path, 'r').read())
+ results[metric] = residency
+
+ return results
+
+ def check(self):
+ if self.__gettid() is None:
+ self.error("Cannot get thread ID. Stats would be completely broken.")
+ return False
+
+ self._orig_name = self.chart_name
+
+ for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
+ # ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
+ path_elem = path.split('/')
+ cpu = path_elem[-4]
+ state = path_elem[-2]
+ statename = open(path, 'rt').read().rstrip()
+
+ orderid = '%s_cpuidle' % (cpu,)
+ if orderid not in self.definitions:
+ self.order.append(orderid)
+ active_name = '%s_active_time' % (cpu,)
+ self.definitions[orderid] = {
+ 'options': [None, 'C-state residency', 'time%', 'cpuidle', None, 'stacked'],
+ 'lines': [
+ [active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
+ ],
+ }
+ self.assignment[cpu] = {}
+
+ defid = '%s_%s_time' % (orderid, state)
+
+ self.definitions[orderid]['lines'].append(
+ [defid, statename, 'percentage-of-incremental-row', 1, 1]
+ )
+
+ self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
+
+ # Sort order by kernel-specified CPU index
+ self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
+
+ if len(self.definitions) == 0:
+ self.error("couldn't find cstate stats")
+ return False
+
+ return True
+
+ def create(self):
+ self.chart_name = "cpu"
+ status = SimpleService.create(self)
+ self.chart_name = self._orig_name
+ return status
+
+ def update(self, interval):
+ self.chart_name = "cpu"
+ status = SimpleService.update(self, interval=interval)
+ self.chart_name = self._orig_name
+ return status
+
+# vim: set ts=4 sts=4 sw=4 et:
diff --git a/python.d/dovecot.chart.py b/python.d/dovecot.chart.py
index 586478cd..60e8bf6e 100644
--- a/python.d/dovecot.chart.py
+++ b/python.d/dovecot.chart.py
@@ -10,74 +10,78 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['sessions', 'commands',
+ORDER = ['sessions', 'logins', 'commands',
'faults',
'context_switches',
- 'disk', 'bytes', 'syscalls',
+ 'io', 'net', 'syscalls',
'lookup', 'cache',
'auth', 'auth_cache']
CHARTS = {
'sessions': {
- 'options': [None, "logins and sessions", 'number', 'IMAP', 'dovecot.sessions', 'line'],
+ 'options': [None, "Dovecot Active Sessions", 'number', 'sessions', 'dovecot.sessions', 'line'],
'lines': [
- ['num_logins', 'logins', 'absolute'],
['num_connected_sessions', 'active sessions', 'absolute']
]},
+ 'logins': {
+ 'options': [None, "Dovecot Logins", 'number', 'logins', 'dovecot.logins', 'line'],
+ 'lines': [
+ ['num_logins', 'logins', 'absolute']
+ ]},
'commands': {
- 'options': [None, "commands", "commands", 'IMAP', 'dovecot.commands', 'line'],
+ 'options': [None, "Dovecot Commands", "commands", 'commands', 'dovecot.commands', 'line'],
'lines': [
['num_cmds', 'commands', 'absolute']
]},
'faults': {
- 'options': [None, "faults", "faults", 'Faults', 'dovecot.faults', 'line'],
+ 'options': [None, "Dovecot Page Faults", "faults", 'page faults', 'dovecot.faults', 'line'],
'lines': [
['min_faults', 'minor', 'absolute'],
['maj_faults', 'major', 'absolute']
]},
'context_switches': {
- 'options': [None, "context switches", '', 'Context Switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, "Dovecot Context Switches", '', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [
['vol_cs', 'volountary', 'absolute'],
['invol_cs', 'involountary', 'absolute']
]},
- 'disk': {
- 'options': [None, "disk", 'bytes/s', 'Reads and Writes', 'dovecot.disk', 'line'],
+ 'io': {
+ 'options': [None, "Dovecot Disk I/O", 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
'lines': [
- ['disk_input', 'read', 'incremental'],
- ['disk_output', 'write', 'incremental']
+ ['disk_input', 'read', 'incremental', 1, 1024],
+ ['disk_output', 'write', 'incremental', -1, 1024]
]},
- 'bytes': {
- 'options': [None, "bytes", 'bytes/s', 'Reads and Writes', 'dovecot.bytes', 'line'],
+ 'net': {
+ 'options': [None, "Dovecot Network Bandwidth", 'kilobits/s', 'network', 'dovecot.net', 'area'],
'lines': [
- ['read_bytes', 'read', 'incremental'],
- ['write_bytes', 'write', 'incremental']
+ ['read_bytes', 'read', 'incremental', 8, 1024],
+ ['write_bytes', 'write', 'incremental', -8, 1024]
]},
'syscalls': {
- 'options': [None, "number of syscalls", 'syscalls/s', 'Reads and Writes', 'dovecot.syscalls', 'line'],
+ 'options': [None, "Dovecot Number of SysCalls", 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
'lines': [
['read_count', 'read', 'incremental'],
['write_count', 'write', 'incremental']
]},
'lookup': {
- 'options': [None, "lookups", 'number/s', 'Mail', 'dovecot.lookup', 'line'],
+ 'options': [None, "Dovecot Lookups", 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
'lines': [
['mail_lookup_path', 'path', 'incremental'],
['mail_lookup_attr', 'attr', 'incremental']
]},
'cache': {
- 'options': [None, "hits", 'hits/s', 'Mail', 'dovecot.cache', 'line'],
+ 'options': [None, "Dovecot Cache Hits", 'hits/s', 'cache', 'dovecot.cache', 'line'],
'lines': [
['mail_cache_hits', 'hits', 'incremental']
]},
'auth': {
- 'options': [None, "attempts", 'attempts', 'Authentication', 'dovecot.auth', 'stacked'],
+ 'options': [None, "Dovecot Authentications", 'attempts', 'logins', 'dovecot.auth', 'stacked'],
'lines': [
- ['auth_successes', 'success', 'absolute'],
- ['auth_failures', 'failure', 'absolute']
+ ['auth_successes', 'ok', 'absolute'],
+ ['auth_failures', 'failed', 'absolute']
]},
'auth_cache': {
- 'options': [None, "cache", 'number', 'Authentication', 'dovecot.auth_cache', 'stacked'],
+ 'options': [None, "Dovecot Authentication Cache", 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
'lines': [
['auth_cache_hits', 'hit', 'absolute'],
['auth_cache_misses', 'miss', 'absolute']
@@ -106,6 +110,10 @@ class Service(SocketService):
except (ValueError, AttributeError):
return None
+ if raw is None:
+ self.debug("dovecot returned no data")
+ return None
+
data = raw.split('\n')[:2]
desc = data[0].split('\t')
vals = data[1].split('\t')
diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py
new file mode 100644
index 00000000..ff841f17
--- /dev/null
+++ b/python.d/elasticsearch.chart.py
@@ -0,0 +1,402 @@
+# -*- coding: utf-8 -*-
+# Description: elastic search node stats netdata python.d module
+# Author: l2isbad
+
+from base import UrlService
+from requests import get
+from socket import gethostbyname
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from threading import Thread
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+update_every = 5
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['search_perf_total', 'search_perf_time', 'search_latency', 'index_perf_total', 'index_perf_time',
+ 'index_latency', 'jvm_mem_heap', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors',
+ 'host_metrics_http', 'host_metrics_transport', 'thread_pool_qr', 'fdata_cache', 'fdata_ev_tr',
+ 'cluster_health_status', 'cluster_health_nodes', 'cluster_health_shards', 'cluster_stats_nodes',
+ 'cluster_stats_query_cache', 'cluster_stats_docs', 'cluster_stats_store', 'cluster_stats_indices_shards']
+
+CHARTS = {
+ 'search_perf_total': {
+ 'options': [None, 'Number of queries, fetches', 'queries', 'Search performance', 'es.search_query', 'stacked'],
+ 'lines': [
+ ['query_total', 'search_total', 'incremental'],
+ ['fetch_total', 'fetch_total', 'incremental'],
+ ['query_current', 'search_current', 'absolute'],
+ ['fetch_current', 'fetch_current', 'absolute']
+ ]},
+ 'search_perf_time': {
+ 'options': [None, 'Time spent on queries, fetches', 'seconds', 'Search performance', 'es.search_time', 'stacked'],
+ 'lines': [
+ ['query_time_in_millis', 'query', 'incremental', 1, 1000],
+ ['fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
+ ]},
+ 'search_latency': {
+ 'options': [None, 'Query and fetch latency', 'ms', 'Search performance', 'es.search_latency', 'stacked'],
+ 'lines': [
+ ['query_latency', 'query', 'absolute', 1, 1000],
+ ['fetch_latency', 'fetch', 'absolute', 1, 1000]
+ ]},
+ 'index_perf_total': {
+ 'options': [None, 'Number of documents indexed, index refreshes, flushes', 'documents/indexes',
+ 'Indexing performance', 'es.index_doc', 'stacked'],
+ 'lines': [
+ ['indexing_index_total', 'indexed', 'incremental'],
+ ['refresh_total', 'refreshes', 'incremental'],
+ ['flush_total', 'flushes', 'incremental'],
+ ['indexing_index_current', 'indexed_current', 'absolute'],
+ ]},
+ 'index_perf_time': {
+ 'options': [None, 'Time spent on indexing, refreshing, flushing', 'seconds', 'Indexing performance',
+ 'es.search_time', 'stacked'],
+ 'lines': [
+ ['indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
+ ['refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
+ ['flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
+ ]},
+ 'index_latency': {
+ 'options': [None, 'Indexing and flushing latency', 'ms', 'Indexing performance',
+ 'es.index_latency', 'stacked'],
+ 'lines': [
+ ['indexing_latency', 'indexing', 'absolute', 1, 1000],
+ ['flushing_latency', 'flushing', 'absolute', 1, 1000]
+ ]},
+ 'jvm_mem_heap': {
+ 'options': [None, 'JVM heap currently in use/committed', 'percent/MB', 'Memory usage and gc',
+ 'es.jvm_heap', 'area'],
+ 'lines': [
+ ['jvm_heap_percent', 'inuse', 'absolute'],
+ ['jvm_heap_commit', 'commit', 'absolute', -1, 1048576]
+ ]},
+ 'jvm_gc_count': {
+ 'options': [None, 'Count of garbage collections', 'counts', 'Memory usage and gc', 'es.gc_count', 'stacked'],
+ 'lines': [
+ ['young_collection_count', 'young', 'incremental'],
+ ['old_collection_count', 'old', 'incremental']
+ ]},
+ 'jvm_gc_time': {
+ 'options': [None, 'Time spent on garbage collections', 'ms', 'Memory usage and gc', 'es.gc_time', 'stacked'],
+ 'lines': [
+ ['young_collection_time_in_millis', 'young', 'incremental'],
+ ['old_collection_time_in_millis', 'old', 'incremental']
+ ]},
+ 'thread_pool_qr': {
+ 'options': [None, 'Number of queued/rejected threads in thread pool', 'threads', 'Queues and rejections',
+ 'es.qr', 'stacked'],
+ 'lines': [
+ ['bulk_queue', 'bulk_queue', 'absolute'],
+ ['index_queue', 'index_queue', 'absolute'],
+ ['search_queue', 'search_queue', 'absolute'],
+ ['merge_queue', 'merge_queue', 'absolute'],
+ ['bulk_rejected', 'bulk_rej', 'absolute'],
+ ['index_rejected', 'index_rej', 'absolute'],
+ ['search_rejected', 'search_rej', 'absolute'],
+ ['merge_rejected', 'merge_rej', 'absolute']
+ ]},
+ 'fdata_cache': {
+ 'options': [None, 'Fielddata cache size', 'MB', 'Fielddata cache', 'es.fdata_cache', 'line'],
+ 'lines': [
+ ['index_fdata_mem', 'mem_size', 'absolute', 1, 1048576]
+ ]},
+ 'fdata_ev_tr': {
+ 'options': [None, 'Fielddata evictions and circuit breaker tripped count', 'number of events',
+ 'Fielddata cache', 'es.fdata_ev_tr', 'line'],
+ 'lines': [
+ ['index_fdata_evic', 'evictions', 'incremental'],
+ ['breakers_fdata_trip', 'tripped', 'incremental']
+ ]},
+ 'cluster_health_nodes': {
+ 'options': [None, 'Nodes and tasks statistics', 'units', 'Cluster health API',
+ 'es.cluster_health', 'stacked'],
+ 'lines': [
+ ['health_number_of_nodes', 'nodes', 'absolute'],
+ ['health_number_of_data_nodes', 'data_nodes', 'absolute'],
+ ['health_number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ['health_number_of_in_flight_fetch', 'inflight_fetch', 'absolute']
+ ]},
+ 'cluster_health_status': {
+ 'options': [None, 'Cluster status', 'status', 'Cluster health API',
+ 'es.cluster_health_status', 'area'],
+ 'lines': [
+ ['status_green', 'green', 'absolute'],
+ ['status_red', 'red', 'absolute'],
+ ['status_foo1', None, 'absolute'],
+ ['status_foo2', None, 'absolute'],
+ ['status_foo3', None, 'absolute'],
+ ['status_yellow', 'yellow', 'absolute']
+ ]},
+ 'cluster_health_shards': {
+ 'options': [None, 'Shards statistics', 'shards', 'Cluster health API',
+ 'es.cluster_health_sharts', 'stacked'],
+ 'lines': [
+ ['health_active_shards', 'active_shards', 'absolute'],
+ ['health_relocating_shards', 'relocating_shards', 'absolute'],
+ ['health_unassigned_shards', 'unassigned', 'absolute'],
+ ['health_delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
+ ['health_initializing_shards', 'initializing', 'absolute'],
+ ['health_active_shards_percent_as_number', 'active_percent', 'absolute']
+ ]},
+ 'cluster_stats_nodes': {
+ 'options': [None, 'Nodes statistics', 'nodes', 'Cluster stats API',
+ 'es.cluster_stats_nodes', 'stacked'],
+ 'lines': [
+ ['count_data_only', 'data_only', 'absolute'],
+ ['count_master_data', 'master_data', 'absolute'],
+ ['count_total', 'total', 'absolute'],
+ ['count_master_only', 'master_only', 'absolute'],
+ ['count_client', 'client', 'absolute']
+ ]},
+ 'cluster_stats_query_cache': {
+ 'options': [None, 'Query cache statistics', 'queries', 'Cluster stats API',
+ 'es.cluster_stats_query_cache', 'stacked'],
+ 'lines': [
+ ['query_cache_hit_count', 'hit', 'incremental'],
+ ['query_cache_miss_count', 'miss', 'incremental']
+ ]},
+ 'cluster_stats_docs': {
+ 'options': [None, 'Docs statistics', 'count', 'Cluster stats API',
+ 'es.cluster_stats_docs', 'line'],
+ 'lines': [
+ ['docs_count', 'docs', 'absolute']
+ ]},
+ 'cluster_stats_store': {
+ 'options': [None, 'Store statistics', 'MB', 'Cluster stats API',
+ 'es.cluster_stats_store', 'line'],
+ 'lines': [
+ ['store_size_in_bytes', 'size', 'absolute', 1, 1048567]
+ ]},
+ 'cluster_stats_indices_shards': {
+ 'options': [None, 'Indices and shards statistics', 'count', 'Cluster stats API',
+ 'es.cluster_stats_ind_sha', 'stacked'],
+ 'lines': [
+ ['indices_count', 'indices', 'absolute'],
+ ['shards_total', 'shards', 'absolute']
+ ]},
+ 'host_metrics_transport': {
+ 'options': [None, 'Cluster communication transport metrics', 'kbit/s', 'Host metrics',
+ 'es.host_metrics_transport', 'area'],
+ 'lines': [
+ ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
+ ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
+ ]},
+ 'host_metrics_file_descriptors': {
+ 'options': [None, 'Available file descriptors in percent', 'percent', 'Host metrics',
+ 'es.host_metrics_descriptors', 'area'],
+ 'lines': [
+ ['file_descriptors_used', 'used', 'absolute', 1, 10]
+ ]},
+ 'host_metrics_http': {
+ 'options': [None, 'Opened HTTP connections', 'connections', 'Host metrics',
+ 'es.host_metrics_http', 'line'],
+ 'lines': [
+ ['http_current_open', 'opened', 'absolute', 1, 1]
+ ]}
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.latency = dict()
+
+ def check(self):
+ # We can't start if <host> AND <port> not specified
+ if not all([self.host, self.port]):
+ return False
+
+ # It as a bad idea to use hostname.
+ # Hostname -> ipaddress
+ try:
+ self.host = gethostbyname(self.host)
+ except Exception as e:
+ self.error(str(e))
+ return False
+
+ # HTTP Auth? NOT TESTED
+ self.auth = self.user and self.password
+
+ # Create URL for every Elasticsearch API
+ url_node_stats = 'http://%s:%s/_nodes/_local/stats' % (self.host, self.port)
+ url_cluster_health = 'http://%s:%s/_cluster/health' % (self.host, self.port)
+ url_cluster_stats = 'http://%s:%s/_cluster/stats' % (self.host, self.port)
+
+ # Create list of enabled API calls
+ user_choice = [bool(self.configuration.get('node_stats', True)),
+ bool(self.configuration.get('cluster_health', True)),
+ bool(self.configuration.get('cluster_stats', True))]
+
+ avail_methods = [(self._get_node_stats, url_node_stats),
+ (self._get_cluster_health, url_cluster_health),
+ (self._get_cluster_stats, url_cluster_stats)]
+
+ # Remove disabled API calls from 'avail methods'
+ self.methods = [avail_methods[_] for _ in range(len(avail_methods)) if user_choice[_]]
+
+ # Run _get_data for ALL active API calls.
+ api_result = {}
+ for method in self.methods:
+ api_result[method[1]] = (bool(self._get_raw_data(method[1])))
+
+ # We can start ONLY if all active API calls returned NOT None
+ if not all(api_result.values()):
+ self.error('Plugin could not get data from all APIs')
+ self.error('%s' % api_result)
+ return False
+ else:
+ self.info('%s' % api_result)
+ self.info('Plugin was started successfully')
+
+ return True
+
+ def _get_raw_data(self, url):
+ try:
+ if not self.auth:
+ raw_data = get(url)
+ else:
+ raw_data = get(url, auth=(self.user, self.password))
+ except Exception:
+ return None
+
+ return raw_data
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method[0], args=(queue, method[1]))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_cluster_health(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ data = self._get_raw_data(url)
+
+ if not data:
+ queue.put({})
+ else:
+ data = data.json()
+
+ to_netdata = dict()
+ to_netdata.update(update_key('health', data))
+ to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
+ 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
+ to_netdata[''.join(['status_', to_netdata.get('health_status', '')])] = 1
+
+ queue.put(to_netdata)
+
+ def _get_cluster_stats(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ data = self._get_raw_data(url)
+
+ if not data:
+ queue.put({})
+ else:
+ data = data.json()
+
+ to_netdata = dict()
+ to_netdata.update(update_key('count', data['nodes']['count']))
+ to_netdata.update(update_key('query_cache', data['indices']['query_cache']))
+ to_netdata.update(update_key('docs', data['indices']['docs']))
+ to_netdata.update(update_key('store', data['indices']['store']))
+ to_netdata['indices_count'] = data['indices']['count']
+ to_netdata['shards_total'] = data['indices']['shards']['total']
+
+ queue.put(to_netdata)
+
+ def _get_node_stats(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ data = self._get_raw_data(url)
+
+ if not data:
+ queue.put({})
+ else:
+ data = data.json()
+ node = list(data['nodes'].keys())[0]
+ to_netdata = dict()
+ # Search performance metrics
+ to_netdata.update(data['nodes'][node]['indices']['search'])
+ to_netdata['query_latency'] = self.find_avg(to_netdata['query_total'],
+ to_netdata['query_time_in_millis'], 'query_latency')
+ to_netdata['fetch_latency'] = self.find_avg(to_netdata['fetch_total'],
+ to_netdata['fetch_time_in_millis'], 'fetch_latency')
+
+ # Indexing performance metrics
+ for key in ['indexing', 'refresh', 'flush']:
+ to_netdata.update(update_key(key, data['nodes'][node]['indices'].get(key, {})))
+ to_netdata['indexing_latency'] = self.find_avg(to_netdata['indexing_index_total'],
+ to_netdata['indexing_index_time_in_millis'], 'index_latency')
+ to_netdata['flushing_latency'] = self.find_avg(to_netdata['flush_total'],
+ to_netdata['flush_total_time_in_millis'], 'flush_latency')
+ # Memory usage and garbage collection
+ to_netdata.update(update_key('young', data['nodes'][node]['jvm']['gc']['collectors']['young']))
+ to_netdata.update(update_key('old', data['nodes'][node]['jvm']['gc']['collectors']['old']))
+ to_netdata['jvm_heap_percent'] = data['nodes'][node]['jvm']['mem']['heap_used_percent']
+ to_netdata['jvm_heap_commit'] = data['nodes'][node]['jvm']['mem']['heap_committed_in_bytes']
+
+ # Thread pool queues and rejections
+ for key in ['bulk', 'index', 'search', 'merge']:
+ to_netdata.update(update_key(key, data['nodes'][node]['thread_pool'].get(key, {})))
+
+ # Fielddata cache
+ to_netdata['index_fdata_mem'] = data['nodes'][node]['indices']['fielddata']['memory_size_in_bytes']
+ to_netdata['index_fdata_evic'] = data['nodes'][node]['indices']['fielddata']['evictions']
+ to_netdata['breakers_fdata_trip'] = data['nodes'][node]['breakers']['fielddata']['tripped']
+
+ # Host metrics
+ to_netdata.update(update_key('http', data['nodes'][node]['http']))
+ to_netdata.update(update_key('transport', data['nodes'][node]['transport']))
+ to_netdata['file_descriptors_used'] = round(float(data['nodes'][node]['process']['open_file_descriptors'])
+ / data['nodes'][node]['process']['max_file_descriptors'] * 1000)
+
+ queue.put(to_netdata)
+
+ def find_avg(self, value1, value2, key):
+ if key not in self.latency:
+ self.latency.update({key: [value1, value2]})
+ return 0
+ else:
+ if not self.latency[key][0] == value1:
+ latency = round(float(value2 - self.latency[key][1]) / float(value1 - self.latency[key][0]) * 1000)
+ self.latency.update({key: [value1, value2]})
+ return latency
+ else:
+ self.latency.update({key: [value1, value2]})
+ return 0
+
+
+def update_key(string, dictionary):
+ return {'_'.join([string, k]): v for k, v in dictionary.items()}
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
new file mode 100644
index 00000000..2d80282c
--- /dev/null
+++ b/python.d/fail2ban.chart.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Description: fail2ban log netdata python.d module
+# Author: l2isbad
+
+from base import LogService
+from re import compile
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+from os import access as is_accessible, R_OK
+
+priority = 60000
+retries = 60
+regex = compile(r'([A-Za-z-]+\]) enabled = ([a-z]+)')
+
+ORDER = ['jails_group']
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
+ self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
+ self.default_jails = ['ssh']
+ try:
+ self.exclude = self.configuration['exclude'].split()
+ except (KeyError, AttributeError):
+ self.exclude = []
+
+
+ def _get_data(self):
+ """
+ Parse new log lines
+ :return: dict
+ """
+
+ # If _get_raw_data returns empty list (no new lines in log file) we will send to Netdata this
+ self.data = {jail: 0 for jail in self.jails_list}
+
+ try:
+ raw = self._get_raw_data()
+ if raw is None:
+ return None
+ elif not raw:
+ return self.data
+ except (ValueError, AttributeError):
+ return None
+
+ # Fail2ban logs looks like
+ # 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
+ self.data = dict(
+ zip(
+ self.jails_list,
+ [len(list(filterfalse(lambda line: (jail + '] Ban') not in line, raw))) for jail in self.jails_list]
+ ))
+
+ return self.data
+
+ def check(self):
+
+ # Check "log_path" is accessible.
+ # If NOT STOP plugin
+ if not is_accessible(self.log_path, R_OK):
+ self.error('Cannot access file %s' % (self.log_path))
+ return False
+
+ # Check "conf_path" is accessible.
+ # If "conf_path" is accesible try to parse it to find enabled jails
+ if is_accessible(self.conf_path, R_OK):
+ with open(self.conf_path, 'rt') as jails_conf:
+ jails_list = regex.findall(' '.join(jails_conf.read().split()))
+ self.jails_list = [jail[:-1] for jail, status in jails_list if status == 'true']
+ else:
+ self.jails_list = []
+ self.error('Cannot access jail.local file %s.' % (self.conf_path))
+
+ # If for some reason parse failed we still can START with default jails_list.
+ self.jails_list = [jail for jail in self.jails_list if jail not in self.exclude]\
+ if self.jails_list else self.default_jails
+ self.create_dimensions()
+ self.info('Plugin succefully started. Jails: %s' % (self.jails_list))
+ return True
+
+ def create_dimensions(self):
+ self.definitions = {'jails_group':
+ {'options':
+ [None, "Jails ban statistics", "bans/s", 'Jails', 'jail.ban', 'line'], 'lines': []}}
+ for jail in self.jails_list:
+ self.definitions['jails_group']['lines'].append([jail, jail, 'absolute'])
diff --git a/python.d/freeradius.chart.py b/python.d/freeradius.chart.py
new file mode 100644
index 00000000..2ac280f0
--- /dev/null
+++ b/python.d/freeradius.chart.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Description: freeradius netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from os.path import isfile
+from re import findall
+from subprocess import Popen, PIPE
+
+# default module values (can be overridden per job in `config`)
+priority = 60000
+retries = 60
+update_every = 15
+directories = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
+
+CHARTS = {
+ 'authentication': {
+ 'options': [None, "Authentication", "packets/s", 'Authentication', 'freerad.auth', 'line'],
+ 'lines': [
+ ['access-accepts', None, 'incremental'], ['access-rejects', None, 'incremental'],
+ ['auth-dropped-requests', None, 'incremental'], ['auth-duplicate-requests', None, 'incremental'],
+ ['auth-invalid-requests', None, 'incremental'], ['auth-malformed-requests', None, 'incremental'],
+ ['auth-unknown-types', None, 'incremental']
+ ]},
+ 'accounting': {
+ 'options': [None, "Accounting", "packets/s", 'Accounting', 'freerad.acct', 'line'],
+ 'lines': [
+ ['accounting-requests', None, 'incremental'], ['accounting-responses', None, 'incremental'],
+ ['acct-dropped-requests', None, 'incremental'], ['acct-duplicate-requests', None, 'incremental'],
+ ['acct-invalid-requests', None, 'incremental'], ['acct-malformed-requests', None, 'incremental'],
+ ['acct-unknown-types', None, 'incremental']
+ ]},
+ 'proxy-auth': {
+ 'options': [None, "Proxy Authentication", "packets/s", 'Authentication', 'freerad.proxy.auth', 'line'],
+ 'lines': [
+ ['proxy-access-accepts', None, 'incremental'], ['proxy-access-rejects', None, 'incremental'],
+ ['proxy-auth-dropped-requests', None, 'incremental'], ['proxy-auth-duplicate-requests', None, 'incremental'],
+ ['proxy-auth-invalid-requests', None, 'incremental'], ['proxy-auth-malformed-requests', None, 'incremental'],
+ ['proxy-auth-unknown-types', None, 'incremental']
+ ]},
+ 'proxy-acct': {
+ 'options': [None, "Proxy Accounting", "packets/s", 'Accounting', 'freerad.proxy.acct', 'line'],
+ 'lines': [
+ ['proxy-accounting-requests', None, 'incremental'], ['proxy-accounting-responses', None, 'incremental'],
+ ['proxy-acct-dropped-requests', None, 'incremental'], ['proxy-acct-duplicate-requests', None, 'incremental'],
+ ['proxy-acct-invalid-requests', None, 'incremental'], ['proxy-acct-malformed-requests', None, 'incremental'],
+ ['proxy-acct-unknown-types', None, 'incremental']
+ ]}
+
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', '18121')
+ self.secret = self.configuration.get('secret', 'adminsecret')
+ self.acct = self.configuration.get('acct', False)
+ self.proxy_auth = self.configuration.get('proxy_auth', False)
+ self.proxy_acct = self.configuration.get('proxy_acct', False)
+ try:
+ self.echo = [''.join([directory, 'echo']) for directory in directories if isfile(''.join([directory, 'echo']))][0]
+ self.radclient = [''.join([directory, 'radclient']) for directory in directories if isfile(''.join([directory, 'radclient']))][0]
+ except IndexError:
+ self.echo = []
+ self.radclient = []
+ self.sub_echo = [self.echo, 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept']
+ self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', ':'.join([self.host, self.port]), 'status', self.secret]
+
+ def check(self):
+ if not all([self.echo, self.radclient]):
+ self.error('Command radclient not found')
+ return False
+ if self._get_raw_data():
+ chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
+ self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
+ self.definitions = {k:v for k, v in CHARTS.items() if k in self.order}
+ self.info('Plugin was started succesfully')
+ return True
+ else:
+ self.error('Request returned no data. Is server alive? Used options: host {}, port {}, secret {}'.format(self.host, self.port, self.secret))
+ return False
+
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ result = self._get_raw_data()
+ return {k.lower():int(v) for k, v in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)}
+
+ def _get_raw_data(self):
+ """
+ The following code is equivalent to
+ 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept" | radclient -t 1 -r 1 host:port status secret'
+ :return: str
+ """
+ try:
+ process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False)
+ process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False)
+ process_echo.stdout.close()
+ raw_result = process_rad.communicate()[0]
+ except Exception:
+ return None
+ else:
+ if process_rad.returncode is 0:
+ return raw_result.decode()
+ else:
+ return None
diff --git a/python.d/gunicorn_log.chart.py b/python.d/gunicorn_log.chart.py
new file mode 100644
index 00000000..94596363
--- /dev/null
+++ b/python.d/gunicorn_log.chart.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Description: nginx log netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Modified for Gunicorn by: Jeff Willette (deltaskelta)
+
+from base import LogService
+import re
+
+priority = 60000
+retries = 60
+# update_every = 3
+
+ORDER = ['codes']
+CHARTS = {
+ 'codes': {
+ 'options': [None, 'gunicorn status codes', 'requests/s', 'requests', 'gunicorn_log.codes', 'stacked'],
+ 'lines': [
+ ["2xx", None, "incremental"],
+ ["3xx", None, "incremental"],
+ ["4xx", None, "incremental"],
+ ["5xx", None, "incremental"]
+ ]}
+}
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ if len(self.log_path) == 0:
+ self.log_path = "/var/log/gunicorn/access.log"
+ self.order = ORDER
+ self.definitions = CHARTS
+ pattern = r'" ([0-9]{3}) '
+ #pattern = r'(?:" )([0-9][0-9][0-9]) ?'
+ self.regex = re.compile(pattern)
+
+ def _get_data(self):
+ """
+ Parse new log lines
+ :return: dict
+ """
+ data = {'2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0}
+ try:
+ raw = self._get_raw_data()
+ if raw is None:
+ return None
+ elif not raw:
+ return data
+ except (ValueError, AttributeError):
+ return None
+
+ regex = self.regex
+ for line in raw:
+ code = regex.search(line)
+ try:
+ beginning = code.group(1)[0]
+ except AttributeError:
+ continue
+
+ if beginning == '2':
+ data["2xx"] += 1
+ elif beginning == '3':
+ data["3xx"] += 1
+ elif beginning == '4':
+ data["4xx"] += 1
+ elif beginning == '5':
+ data["5xx"] += 1
+
+ return data
diff --git a/python.d/haproxy.chart.py b/python.d/haproxy.chart.py
new file mode 100644
index 00000000..2fb97d75
--- /dev/null
+++ b/python.d/haproxy.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: haproxy netdata python.d module
+# Author: l2isbad
+
+from base import UrlService, SocketService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', 'bbin', 'bbout', 'bscur', 'bqcur', 'health_sdown', 'health_bdown']
+CHARTS = {
+ 'fbin': {
+ 'options': [None, "Kilobytes in", "kilobytes in/s", 'Frontend', 'haproxy_f.bin', 'line'],
+ 'lines': [
+ ]},
+ 'fbout': {
+ 'options': [None, "Kilobytes out", "kilobytes out/s", 'Frontend', 'haproxy_f.bout', 'line'],
+ 'lines': [
+ ]},
+ 'fscur': {
+ 'options': [None, "Sessions active", "sessions", 'Frontend', 'haproxy_f.scur', 'line'],
+ 'lines': [
+ ]},
+ 'fqcur': {
+ 'options': [None, "Session in queue", "sessions", 'Frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': [
+ ]},
+ 'bbin': {
+ 'options': [None, "Kilobytes in", "kilobytes in/s", 'Backend', 'haproxy_b.bin', 'line'],
+ 'lines': [
+ ]},
+ 'bbout': {
+ 'options': [None, "Kilobytes out", "kilobytes out/s", 'Backend', 'haproxy_b.bout', 'line'],
+ 'lines': [
+ ]},
+ 'bscur': {
+ 'options': [None, "Sessions active", "sessions", 'Backend', 'haproxy_b.scur', 'line'],
+ 'lines': [
+ ]},
+ 'bqcur': {
+ 'options': [None, "Sessions in queue", "sessions", 'Backend', 'haproxy_b.qcur', 'line'],
+ 'lines': [
+ ]},
+ 'health_sdown': {
+ 'options': [None, "Number of servers in backend in DOWN state", "failed servers", 'Health', 'haproxy_hs.down', 'line'],
+ 'lines': [
+ ]},
+ 'health_bdown': {
+ 'options': [None, "Is backend alive? 1 = DOWN", "failed backend", 'Health', 'haproxy_hb.down', 'line'],
+ 'lines': [
+ ]}
+}
+
+
+class Service(UrlService, SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.request = 'show stat\n'
+ self.poll_method = (UrlService, SocketService)
+ self.order = ORDER
+ self.order_front = [_ for _ in ORDER if _.startswith('f')]
+ self.order_back = [_ for _ in ORDER if _.startswith('b')]
+ self.definitions = CHARTS
+ self.charts = True
+
+ def check(self):
+ if self.configuration.get('url'):
+ self.poll_method = self.poll_method[0]
+ url = self.configuration.get('url')
+ if not url.endswith(';csv;norefresh'):
+ self.error('Bad url(%s). Must be http://<ip.address>:<port>/<url>;csv;norefresh' % url)
+ return False
+ elif self.configuration.get('socket'):
+ self.poll_method = self.poll_method[1]
+ else:
+ self.error('No configuration is specified')
+ return False
+
+ if self.poll_method.check(self):
+ self.info('Plugin was started succesfully. We are using %s.' % self.poll_method.__name__)
+ return True
+
+ def create_charts(self, front_ends, back_ends):
+ for _ in range(len(front_ends)):
+ self.definitions['fbin']['lines'].append(['_'.join(['fbin', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'incremental', 1, 1024])
+ self.definitions['fbout']['lines'].append(['_'.join(['fbout', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'incremental', 1, 1024])
+ self.definitions['fscur']['lines'].append(['_'.join(['fscur', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'absolute'])
+ self.definitions['fqcur']['lines'].append(['_'.join(['fqcur', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'absolute'])
+
+ for _ in range(len(back_ends)):
+ self.definitions['bbin']['lines'].append(['_'.join(['bbin', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'incremental', 1, 1024])
+ self.definitions['bbout']['lines'].append(['_'.join(['bbout', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'incremental', 1, 1024])
+ self.definitions['bscur']['lines'].append(['_'.join(['bscur', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
+ self.definitions['bqcur']['lines'].append(['_'.join(['bqcur', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
+ self.definitions['health_sdown']['lines'].append(['_'.join(['hsdown', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['_'.join(['hbdown', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw_data = self.poll_method._get_raw_data(self).splitlines()
+ except Exception as e:
+ self.error(str(e))
+ return None
+
+ all_instances = [dict(zip(raw_data[0].split(','), raw_data[_].split(','))) for _ in range(1, len(raw_data))]
+
+ back_ends = list(filter(is_backend, all_instances))
+ front_ends = list(filter(is_frontend, all_instances))
+ servers = list(filter(is_server, all_instances))
+
+ if self.charts:
+ self.create_charts(front_ends, back_ends)
+ self.charts = False
+
+ to_netdata = dict()
+
+ for frontend in front_ends:
+ for _ in self.order_front:
+ to_netdata.update({'_'.join([_, frontend['# pxname']]): int(frontend[_[1:]]) if frontend.get(_[1:]) else 0})
+
+ for backend in back_ends:
+ for _ in self.order_back:
+ to_netdata.update({'_'.join([_, backend['# pxname']]): int(backend[_[1:]]) if backend.get(_[1:]) else 0})
+
+ for _ in range(len(back_ends)):
+ to_netdata.update({'_'.join(['hsdown', back_ends[_]['# pxname']]):
+ len([server for server in servers if is_server_down(server, back_ends, _)])})
+ to_netdata.update({'_'.join(['hbdown', back_ends[_]['# pxname']]): 1 if is_backend_down(back_ends, _) else 0})
+
+ return to_netdata
+
+ def _check_raw_data(self, data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return not bool(data)
+
+def is_backend(backend):
+ try:
+ return backend['svname'] == 'BACKEND' and backend['# pxname'] != 'stats'
+ except Exception:
+ return False
+
+def is_frontend(frontend):
+ try:
+ return frontend['svname'] == 'FRONTEND' and frontend['# pxname'] != 'stats'
+ except Exception:
+ return False
+
+def is_server(server):
+ try:
+ return not server['svname'].startswith(('FRONTEND', 'BACKEND'))
+ except Exception:
+ return False
+
+def is_server_down(server, back_ends, _):
+ try:
+ return server['# pxname'] == back_ends[_]['# pxname'] and server['status'] == 'DOWN'
+ except Exception:
+ return False
+
+def is_backend_down(back_ends, _):
+ try:
+ return back_ends[_]['status'] == 'DOWN'
+ except Exception:
+ return False
diff --git a/python.d/hddtemp.chart.py b/python.d/hddtemp.chart.py
index 465bfdfa..8a98995b 100644
--- a/python.d/hddtemp.chart.py
+++ b/python.d/hddtemp.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: hddtemp netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Modified by l2isbad
import os
from base import SocketService
@@ -21,15 +22,6 @@ retries = 60
ORDER = ['temperatures']
-CHARTS = {
- 'temperatures': {
- 'options': ['disks_temp', 'temperature', 'Celsius', 'Disks temperature', 'hddtemp.temperatures', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]}
-}
-
-
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
@@ -38,7 +30,10 @@ class Service(SocketService):
self.host = "127.0.0.1"
self.port = 7634
self.order = ORDER
- self.definitions = CHARTS
+ self.fahrenheit = ('Fahrenheit', lambda x: x * 9 / 5 + 32) if self.configuration.get('fahrenheit') else False
+ self.whatever = ('Whatever', lambda x: x * 33 / 22 + 11) if self.configuration.get('whatever') else False
+ self.choice = (choice for choice in [self.fahrenheit, self.whatever] if choice)
+ self.calc = lambda x: x
self.disks = []
def _get_disks(self):
@@ -82,7 +77,7 @@ class Service(SocketService):
if not raw[i*5+1] in self.disks:
continue
try:
- val = int(raw[i*5+3])
+ val = self.calc(int(raw[i*5+3]))
except ValueError:
val = 0
data[raw[i*5+1].replace("/dev/", "")] = val
@@ -105,9 +100,21 @@ class Service(SocketService):
if data is None:
return False
+ self.definitions = {
+ 'temperatures': {
+ 'options': ['disks_temp', 'Disks Temperatures', 'temperatures', 'hddtemp.temperatures', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]}
+ }
+ try:
+ self.choice = next(self.choice)
+ except StopIteration:
+ self.definitions[ORDER[0]]['options'].insert(2, 'Celsius')
+ else:
+ self.calc = self.choice[1]
+ self.definitions[ORDER[0]]['options'].insert(2, self.choice[0])
+
for name in data:
self.definitions[ORDER[0]]['lines'].append([name])
-
return True
-
-
diff --git a/python.d/ipfs.chart.py b/python.d/ipfs.chart.py
index b0b2a965..eaea3c5e 100644
--- a/python.d/ipfs.chart.py
+++ b/python.d/ipfs.chart.py
@@ -19,7 +19,7 @@ retries = 60
# }}
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers']
+ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
CHARTS = {
'bandwidth': {
@@ -32,9 +32,25 @@ CHARTS = {
'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
'lines': [
["peers", None, 'absolute']
- ]}
+ ]},
+ 'repo_size': {
+ 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
+ 'lines': [
+ ["avail", None, "absolute", 1, 1e9],
+ ["size", None, "absolute", 1, 1e9],
+ ]},
+ 'repo_objects': {
+ 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
+ 'lines': [
+ ["objects", None, "absolute", 1, 1],
+ ["pinned", None, "absolute", 1, 1],
+ ["recursive_pins", None, "absolute", 1, 1]
+ ]},
}
+SI_zeroes = {'k': 3, 'm': 6, 'g': 9, 't': 12,
+ 'p': 15, 'e': 18, 'z': 21, 'y': 24 }
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
@@ -45,63 +61,63 @@ class Service(UrlService):
self.baseurl = "http://localhost:5001"
self.order = ORDER
self.definitions = CHARTS
+ self.__storagemax = None
- def _get_bandwidth(self):
+ def _get_json(self, suburl):
"""
- Format data received from http request
- :return: int, int
+ :return: json decoding of the specified url
"""
- self.url = self.baseurl + "/api/v0/stats/bw"
- try:
- raw = self._get_raw_data()
- except AttributeError:
- return None
-
+ self.url = self.baseurl + suburl
try:
- parsed = json.loads(raw)
- bw_in = int(parsed['RateIn'])
- bw_out = int(parsed['RateOut'])
+ return json.loads(self._get_raw_data())
except:
- return None
+ return {}
- return bw_in, bw_out
+ def _recursive_pins(self, keys):
+ return len([k for k in keys if keys[k]["Type"] == b"recursive"])
- def _get_peers(self):
- """
- Format data received from http request
- :return: int
- """
- self.url = self.baseurl + "/api/v0/swarm/peers"
- try:
- raw = self._get_raw_data()
- except AttributeError:
- return None
-
- try:
- parsed = json.loads(raw)
- peers = len(parsed['Strings'])
- except:
- return None
+ def _dehumanize(self, storemax):
+ # convert from '10Gb' to 10000000000
+ if type(storemax) != int:
+ storemax = storemax.lower()
+ if storemax.endswith('b'):
+ val, units = storemax[:-2], storemax[-2]
+ if units in SI_zeroes:
+ val += '0'*SI_zeroes[units]
+ storemax = val
+ try:
+ storemax = int(storemax)
+ except:
+ storemax = None
+ return storemax
- return peers
+ def _storagemax(self, storecfg):
+ if self.__storagemax is None:
+ self.__storagemax = self._dehumanize(storecfg['StorageMax'])
+ return self.__storagemax
def _get_data(self):
"""
Get data from API
:return: dict
"""
- try:
- peers = self._get_peers()
- bandwidth_in, bandwidth_out = self._get_bandwidth()
- except:
- return None
- data = {}
- if peers is not None:
- data['peers'] = peers
- if bandwidth_in is not None and bandwidth_out is not None:
- data['in'] = bandwidth_in
- data['out'] = bandwidth_out
+ cfg = { # suburl : List of (result-key, original-key, transform-func)
+ '/api/v0/stats/bw' :[('in', 'RateIn', int ),
+ ('out', 'RateOut', int )],
+ '/api/v0/swarm/peers':[('peers', 'Strings', len )],
+ '/api/v0/stats/repo' :[('size', 'RepoSize', int),
+ ('objects', 'NumObjects', int)],
+ '/api/v0/pin/ls': [('pinned', 'Keys', len),
+ ('recursive_pins', 'Keys', self._recursive_pins)],
+ '/api/v0/config/show': [('avail', 'Datastore', self._storagemax)]
+ }
+ r = {}
+ for suburl in cfg:
+ json = self._get_json(suburl)
+ for newkey, origkey, xmute in cfg[suburl]:
+ try:
+ r[newkey] = xmute(json[origkey])
+ except: pass
+ return r or None
+
- if len(data) == 0:
- return None
- return data
diff --git a/python.d/isc_dhcpd.chart.py b/python.d/isc_dhcpd.chart.py
new file mode 100644
index 00000000..54fbfb1f
--- /dev/null
+++ b/python.d/isc_dhcpd.chart.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+# Description: isc dhcpd lease netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from time import mktime, strptime, gmtime, time
+from os import stat
+try:
+ from ipaddress import IPv4Address as ipaddress
+ from ipaddress import ip_network
+ have_ipaddress = True
+except ImportError:
+ have_ipaddress = False
+try:
+ from itertools import filterfalse as filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+
+
+priority = 60000
+retries = 60
+update_every = 60
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.leases_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
+ self.pools = self.configuration.get('pools')
+
+ # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
+ # TODO: update algorithm to parse correctly 'local' db-time-format
+ # (epoch <seconds-since-epoch>; # <day-name> <month-name> <day-number> <hours>:<minutes>:<seconds> <year>)
+ # Also only ipv4 supported
+
+ def check(self):
+ if not self._get_raw_data():
+ self.error('Make sure leases_path is correct and leases log file is readable by netdata')
+ return False
+ elif not have_ipaddress:
+ self.error('No ipaddress module. Please install (py2-ipaddress in case of python2)')
+ return False
+ else:
+ try:
+ self.pools = self.pools.split()
+ if not [ip_network(return_utf(pool)) for pool in self.pools]:
+ self.error('Pools list is empty')
+ return False
+ except (ValueError, IndexError, AttributeError, SyntaxError) as e:
+ self.error('Pools configurations is incorrect', str(e))
+ return False
+
+ # Creating static charts
+ self.order = ['parse_time', 'leases_size', 'utilization', 'total']
+ self.definitions = {'utilization':
+ {'options':
+ [None, 'Pools utilization', 'used %', 'Utilization', 'isc_dhcpd.util', 'line'],
+ 'lines': []},
+ 'total':
+ {'options':
+ [None, 'Total all pools', 'leases', 'Utilization', 'isc_dhcpd.total', 'line'],
+ 'lines': [['total', 'leases', 'absolute']]},
+ 'parse_time':
+ {'options':
+ [None, 'Parse time', 'ms', 'Parse stats', 'isc_dhcpd.parse', 'line'],
+ 'lines': [['ptime', 'time', 'absolute']]},
+ 'leases_size':
+ {'options':
+ [None, 'dhcpd.leases file size', 'kilobytes', 'Parse stats', 'isc_dhcpd.lsize', 'line'],
+ 'lines': [['lsize', 'size', 'absolute']]}}
+ # Creating dynamic charts
+ for pool in self.pools:
+ self.definitions['utilization']['lines'].append([''.join(['ut_', pool]), pool, 'absolute'])
+ self.order.append(''.join(['leases_', pool]))
+ self.definitions[''.join(['leases_', pool])] = \
+ {'options': [None, 'Active leases', 'leases', 'Pools', 'isc_dhcpd.lease', 'area'],
+ 'lines': [[''.join(['le_', pool]), pool, 'absolute']]}
+
+ self.info('Plugin was started succesfully')
+ return True
+
+ def _get_raw_data(self):
+ """
+ Parses log file
+ :return: tuple(
+ [ipaddress, lease end time, ...],
+ time to parse leases file
+ )
+ """
+ try:
+ with open(self.leases_path, 'rt') as dhcp_leases:
+ time_start = time()
+ part1 = filterfalse(find_lease, dhcp_leases)
+ part2 = filterfalse(find_ends, dhcp_leases)
+ raw_result = dict(zip(part1, part2))
+ time_end = time()
+
+ file_parse_time = round((time_end - time_start) * 1000)
+
+ except Exception as e:
+ self.error("Failed to parse leases file:", str(e))
+ return None
+
+ else:
+ result = (raw_result, file_parse_time)
+ return result
+
+ def _get_data(self):
+ """
+ :return: dict
+ """
+ raw_leases = self._get_raw_data()
+
+ if not raw_leases:
+ return None
+
+ # Result: {ipaddress: end lease time, ...}
+ all_leases = {k[6:len(k)-3]:v[7:len(v)-2] for k, v in raw_leases[0].items()}
+
+ # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
+ active_leases = [k for k, v in all_leases.items() if is_binding_active(all_leases[k])]
+
+ # Result: {pool: number of active bindings in pool, ...}
+ pools_count = {pool: len([lease for lease in active_leases if is_address_in(lease, pool)])
+ for pool in self.pools}
+
+ # Result: {pool: number of host ip addresses in pool, ...}
+ pools_max = {pool: (2 ** (32 - int(pool.split('/')[1])) - 2)
+ for pool in self.pools}
+
+ # Result: {pool: % utilization, ....} (percent)
+ pools_util = {pool:int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0))
+ for pool in self.pools}
+
+ # Bulding dicts to send to netdata
+ final_count = {''.join(['le_', k]): v for k, v in pools_count.items()}
+ final_util = {''.join(['ut_', k]): v for k, v in pools_util.items()}
+
+ to_netdata = {'total': len(active_leases)}
+ to_netdata.update({'lsize': int(stat(self.leases_path)[6] / 1024)})
+ to_netdata.update({'ptime': int(raw_leases[1])})
+ to_netdata.update(final_util)
+ to_netdata.update(final_count)
+
+ return to_netdata
+
+
+def is_binding_active(binding):
+ return mktime(strptime(binding, '%w %Y/%m/%d %H:%M:%S')) - mktime(gmtime()) > 0
+
+
+def is_address_in(address, pool):
+ return ipaddress(return_utf(address)) in ip_network(return_utf(pool))
+
+
+def find_lease(value):
+ return value[0:3] != 'lea'
+
+
+def find_ends(value):
+ return value[2:6] != 'ends'
+
+def return_utf(s):
+ # python2 returns "<type 'str'>" for simple strings
+ # python3 returns "<class 'str'>" for unicode strings
+ if str(type(s)) == "<type 'str'>":
+ return unicode(s, 'utf-8')
+ return s
diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py
new file mode 100644
index 00000000..0f7d2b44
--- /dev/null
+++ b/python.d/mdstat.chart.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+# Description: mdstat netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from re import compile
+priority = 60000
+retries = 60
+update_every = 1
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ['agr_health']
+ self.definitions = {'agr_health':
+ {'options':
+ [None, 'Faulty devices in MD', 'failed disks', 'health', 'md.health', 'line'],
+ 'lines': []}}
+ self.proc_mdstat = '/proc/mdstat'
+ self.regex_disks = compile(r'((?<=\ )[a-zA-Z_0-9]+(?= : active)).*?((?<= \[)[0-9]+)/([0-9]+(?=\] ))')
+ self.regex_status = compile(r'([a-zA-Z_0-9]+)( : active)[^:]*?([a-z]+) = ([0-9.]+(?=%)).*?((?<=finish=)[0-9.]+)min speed=([0-9]+)')
+
+ def check(self):
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ self.error('Cant read mdstat data from %s' % (self.proc_mdstat))
+ return False
+
+ md_list = [md[0] for md in self.regex_disks.findall(raw_data)]
+
+ if not md_list:
+ self.error('No active arrays in %s' % (self.proc_mdstat))
+ return False
+ else:
+ for md in md_list:
+ self.order.append(md)
+ self.order.append(''.join([md, '_status']))
+ self.order.append(''.join([md, '_rate']))
+ self.definitions['agr_health']['lines'].append([''.join([md, '_health']), md, 'absolute'])
+ self.definitions[md] = {'options':
+ [None, '%s disks stats' % md, 'disks', md, 'md.disks', 'stacked'],
+ 'lines': [[''.join([md, '_total']), 'total', 'absolute'],
+ [''.join([md, '_inuse']), 'inuse', 'absolute']]}
+ self.definitions[''.join([md, '_status'])] = {'options':
+ [None, '%s current status' % md, 'percent', md, 'md.status', 'line'],
+ 'lines': [[''.join([md, '_resync']), 'resync', 'absolute', 1, 100],
+ [''.join([md, '_recovery']), 'recovery', 'absolute', 1, 100],
+ [''.join([md, '_reshape']), 'reshape', 'absolute', 1, 100],
+ [''.join([md, '_check']), 'check', 'absolute', 1, 100]]}
+ self.definitions[''.join([md, '_rate'])] = {'options':
+ [None, '%s operation status' % md, 'rate', md, 'md.rate', 'line'],
+ 'lines': [[''.join([md, '_finishin']), 'finish min', 'absolute', 1, 100],
+ [''.join([md, '_rate']), 'megabyte/s', 'absolute', -1, 100]]}
+ self.info('Plugin was started successfully. MDs to monitor %s' % (md_list))
+
+ return True
+
+ def _get_raw_data(self):
+ """
+ Read data from /proc/mdstat
+ :return: str
+ """
+ try:
+ with open(self.proc_mdstat, 'rt') as proc_mdstat:
+ raw_result = proc_mdstat.read()
+ except Exception:
+ return None
+ else:
+ raw_result = ' '.join(raw_result.split())
+ return raw_result
+
+ def _get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+ raw_mdstat = self._get_raw_data()
+ mdstat_disks = self.regex_disks.findall(raw_mdstat)
+ mdstat_status = self.regex_status.findall(raw_mdstat)
+ to_netdata = {}
+
+ for md in mdstat_disks:
+ to_netdata[''.join([md[0], '_total'])] = int(md[1])
+ to_netdata[''.join([md[0], '_inuse'])] = int(md[2])
+ to_netdata[''.join([md[0], '_health'])] = int(md[1]) - int(md[2])
+ to_netdata[''.join([md[0], '_check'])] = 0
+ to_netdata[''.join([md[0], '_resync'])] = 0
+ to_netdata[''.join([md[0], '_reshape'])] = 0
+ to_netdata[''.join([md[0], '_recovery'])] = 0
+ to_netdata[''.join([md[0], '_finishin'])] = 0
+ to_netdata[''.join([md[0], '_rate'])] = 0
+
+ for md in mdstat_status:
+ to_netdata[''.join([md[0], '_' + md[2]])] = round(float(md[3]) * 100)
+ to_netdata[''.join([md[0], '_finishin'])] = round(float(md[4]) * 100)
+ to_netdata[''.join([md[0], '_rate'])] = round(float(md[5]) / 1000 * 100)
+
+ return to_netdata
diff --git a/python.d/memcached.chart.py b/python.d/memcached.chart.py
index 5a1400c9..0d6807ba 100644
--- a/python.d/memcached.chart.py
+++ b/python.d/memcached.chart.py
@@ -20,89 +20,89 @@ retries = 60
# }}
ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed',
- 'get', 'get_rate', 'set_rate', 'delete', 'cas', 'increment', 'decrement', 'touch', 'touch_rate']
+ 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate']
CHARTS = {
'cache': {
- 'options': [None, 'Cache Size', 'megabytes', 'Cache', 'memcached.cache', 'stacked'],
+ 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'],
'lines': [
- ['used', 'used', 'absolute', 1, 1048576],
- ['avail', 'available', 'absolute', 1, 1048576]
+ ['avail', 'available', 'absolute', 1, 1048576],
+ ['used', 'used', 'absolute', 1, 1048576]
]},
'net': {
- 'options': [None, 'Network', 'kilobytes/s', 'Network', 'memcached.net', 'line'],
+ 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
'lines': [
- ['bytes_read', 'read', 'incremental', 1, 1024],
- ['bytes_written', 'written', 'incremental', 1, 1024]
+ ['bytes_read', 'in', 'incremental', 8, 1024],
+ ['bytes_written', 'out', 'incremental', -8, 1024]
]},
'connections': {
- 'options': [None, 'Connections', 'connections/s', 'Cluster', 'memcached.connections', 'line'],
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
'lines': [
['curr_connections', 'current', 'incremental'],
['rejected_connections', 'rejected', 'incremental'],
['total_connections', 'total', 'incremental']
]},
'items': {
- 'options': [None, 'Items', 'items', 'Cluster', 'memcached.items', 'line'],
+ 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
'lines': [
['curr_items', 'current', 'absolute'],
['total_items', 'total', 'absolute']
]},
'evicted_reclaimed': {
- 'options': [None, 'Items', 'items', 'Evicted and Reclaimed', 'memcached.evicted_reclaimed', 'line'],
+ 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
'lines': [
- ['evictions', 'evicted', 'absolute'],
- ['reclaimed', 'reclaimed', 'absolute']
+ ['reclaimed', 'reclaimed', 'absolute'],
+ ['evictions', 'evicted', 'absolute']
]},
'get': {
- 'options': [None, 'Requests', 'requests', 'GET', 'memcached.get', 'stacked'],
+ 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
'lines': [
['get_hits', 'hits', 'percent-of-absolute-row'],
['get_misses', 'misses', 'percent-of-absolute-row']
]},
'get_rate': {
- 'options': [None, 'Rate', 'requests/s', 'GET', 'memcached.get_rate', 'line'],
+ 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
'lines': [
['cmd_get', 'rate', 'incremental']
]},
'set_rate': {
- 'options': [None, 'Rate', 'requests/s', 'SET', 'memcached.set_rate', 'line'],
+ 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
'lines': [
['cmd_set', 'rate', 'incremental']
]},
'delete': {
- 'options': [None, 'Requests', 'requests', 'DELETE', 'memcached.delete', 'stacked'],
+ 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
'lines': [
['delete_hits', 'hits', 'percent-of-absolute-row'],
['delete_misses', 'misses', 'percent-of-absolute-row'],
]},
'cas': {
- 'options': [None, 'Requests', 'requests', 'CAS', 'memcached.cas', 'stacked'],
+ 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
'lines': [
['cas_hits', 'hits', 'percent-of-absolute-row'],
['cas_misses', 'misses', 'percent-of-absolute-row'],
['cas_badval', 'bad value', 'percent-of-absolute-row']
]},
'increment': {
- 'options': [None, 'Requests', 'requests', 'Increment', 'memcached.increment', 'stacked'],
+ 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
'lines': [
['incr_hits', 'hits', 'percent-of-absolute-row'],
['incr_misses', 'misses', 'percent-of-absolute-row']
]},
'decrement': {
- 'options': [None, 'Requests', 'requests', 'Decrement', 'memcached.decrement', 'stacked'],
+ 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
'lines': [
['decr_hits', 'hits', 'percent-of-absolute-row'],
['decr_misses', 'misses', 'percent-of-absolute-row']
]},
'touch': {
- 'options': [None, 'Requests', 'requests', 'Touch', 'memcached.touch', 'stacked'],
+ 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
'lines': [
['touch_hits', 'hits', 'percent-of-absolute-row'],
['touch_misses', 'misses', 'percent-of-absolute-row']
]},
'touch_rate': {
- 'options': [None, 'Rate', 'requests/s', 'Touch', 'memcached.touch_rate', 'line'],
+ 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
'lines': [
['cmd_touch', 'rate', 'incremental']
]}
@@ -125,44 +125,52 @@ class Service(SocketService):
Get data from socket
:return: dict
"""
+ response = self._get_raw_data()
+ if response is None:
+ # error has already been logged
+ return None
+
+ if response.startswith('ERROR'):
+ self.error("received ERROR")
+ return None
+
try:
- raw = self._get_raw_data().split("\n")
+ parsed = response.split("\n")
except AttributeError:
- self.error("no data received")
- return None
- if raw[0].startswith('ERROR'):
- self.error("Memcached returned ERROR")
+ self.error("response is invalid/empty")
return None
+
+ # split the response
data = {}
- for line in raw:
+ for line in parsed:
if line.startswith('STAT'):
try:
t = line[5:].split(' ')
- data[t[0]] = int(t[1])
+ data[t[0]] = t[1]
except (IndexError, ValueError):
+ self.debug("invalid line received: " + str(line))
pass
- try:
- data['hit_rate'] = int((data['keyspace_hits'] / float(data['keyspace_hits'] + data['keyspace_misses'])) * 100)
- except:
- data['hit_rate'] = 0
+ if len(data) == 0:
+ self.error("received data doesn't have any records")
+ return None
+
+ # custom calculations
try:
data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
- data['used'] = data['bytes']
+ data['used'] = int(data['bytes'])
except:
pass
- if len(data) == 0:
- self.error("received data doesn't have needed records")
- return None
- else:
- return data
+ return data
def _check_raw_data(self, data):
if data.endswith('END\r\n'):
+ self.debug("received full response from memcached")
return True
- else:
- return False
+
+ self.debug("waiting more data from memcached")
+ return False
def check(self):
"""
diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py
index dab6fad3..0e3a0329 100644
--- a/python.d/mysql.chart.py
+++ b/python.d/mysql.chart.py
@@ -40,6 +40,7 @@ retries = 60
# query executed on MySQL server
QUERY = "SHOW GLOBAL STATUS;"
+QUERY_SLAVE = "SHOW SLAVE STATUS;"
ORDER = ['net',
'queries',
@@ -55,7 +56,7 @@ ORDER = ['net',
'innodb_buffer_pool_read_ahead', 'innodb_buffer_pool_reqs', 'innodb_buffer_pool_ops',
'qcache_ops', 'qcache', 'qcache_freemem', 'qcache_memblocks',
'key_blocks', 'key_requests', 'key_disk_ops',
- 'files', 'files_rate']
+ 'files', 'files_rate', 'slave_behind', 'slave_status']
CHARTS = {
'net': {
@@ -298,8 +299,18 @@ CHARTS = {
["Connection_errors_peer_address", "peer_addr", "incremental"],
["Connection_errors_select", "select", "incremental"],
["Connection_errors_tcpwrap", "tcpwrap", "incremental"]
+ ]},
+ 'slave_behind': {
+ 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
+ 'lines': [
+ ["slave_behind", "seconds", "absolute"]
+ ]},
+ 'slave_status': {
+ 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
+ 'lines': [
+ ["slave_sql", "sql_running", "absolute"],
+ ["slave_io", "io_running", "absolute"]
]}
-
}
@@ -310,6 +321,7 @@ class Service(SimpleService):
self.order = ORDER
self.definitions = CHARTS
self.connection = None
+ self.do_slave = -1
def _parse_config(self, configuration):
"""
@@ -348,6 +360,67 @@ class Service(SimpleService):
self.error("problem connecting to server:", e)
raise RuntimeError
+ def _get_data_slave(self):
+ """
+ Get slave raw data from MySQL server
+ :return: dict
+ """
+ if self.connection is None:
+ try:
+ self._connect()
+ except RuntimeError:
+ return None
+
+ slave_data = None
+ slave_raw_data = None
+ try:
+ cursor = self.connection.cursor()
+ if cursor.execute(QUERY_SLAVE):
+ slave_raw_data = dict(list(zip([elem[0] for elem in cursor.description], cursor.fetchone())))
+
+ except MySQLdb.OperationalError as e:
+ self.debug("Reconnecting for query", QUERY_SLAVE, ":", str(e))
+ try:
+ self._connect()
+ cursor = self.connection.cursor()
+ if cursor.execute(QUERY_SLAVE):
+ slave_raw_data = dict(list(zip([elem[0] for elem in cursor.description], cursor.fetchone())))
+ except Exception as e:
+ self.error("retried, but cannot execute query", QUERY_SLAVE, ":", str(e))
+ self.connection.close()
+ self.connection = None
+ return None
+
+ except Exception as e:
+ self.error("cannot execute query", QUERY_SLAVE, ":", str(e))
+ self.connection.close()
+ self.connection = None
+ return None
+
+ if slave_raw_data is not None:
+ slave_data = {
+ 'slave_behind': None,
+ 'slave_sql': None,
+ 'slave_io': None
+ }
+
+ try:
+ slave_data['slave_behind'] = int(slave_raw_data.setdefault('Seconds_Behind_Master', -1))
+ except:
+ slave_data['slave_behind'] = None
+
+ try:
+ slave_data['slave_sql'] = 1 if slave_raw_data.get('Slave_SQL_Running') == 'Yes' else -1
+ except:
+ slave_data['slave_sql'] = None
+
+ try:
+ slave_data['slave_io'] = 1 if slave_raw_data.get('Slave_IO_Running') == 'Yes' else -1
+ except:
+ slave_data['slave_io'] = None
+
+ return slave_data
+
def _get_data(self):
"""
Get raw data from MySQL server
@@ -362,23 +435,47 @@ class Service(SimpleService):
cursor = self.connection.cursor()
cursor.execute(QUERY)
raw_data = cursor.fetchall()
+
except MySQLdb.OperationalError as e:
- self.debug("Reconnecting due to", str(e))
- self._connect()
- cursor = self.connection.cursor()
- cursor.execute(QUERY)
- raw_data = cursor.fetchall()
+ self.debug("Reconnecting for query", QUERY, ":", str(e))
+ try:
+ self._connect()
+ cursor = self.connection.cursor()
+ cursor.execute(QUERY)
+ raw_data = cursor.fetchall()
+ except Exception as e:
+ self.error("retried, but cannot execute query", QUERY, ":", str(e))
+ self.connection.close()
+ self.connection = None
+ return None
+
except Exception as e:
- self.error("cannot execute query.", e)
+ self.error("cannot execute query", QUERY, ":", str(e))
self.connection.close()
self.connection = None
return None
data = dict(raw_data)
+
+ # check for slave data
+ # the first time is -1 (so we do it)
+ # then it is set to 1 or 0 and we keep it like that
+ if self.do_slave != 0:
+ slave_data = self._get_data_slave()
+ if slave_data is not None:
+ data.update(slave_data)
+ if self.do_slave == -1:
+ self.do_slave = 1
+ else:
+ if self.do_slave == -1:
+ self.error("replication metrics will be disabled - please allow netdata to collect them.")
+ self.do_slave = 0
+
+ # do calculations
try:
- data["Thread_cache_misses"] = int(data["Threads_created"] * 10000 / float(data["Connections"]))
+ data["Thread_cache_misses"] = round(float(data["Threads_created"]) / float(data["Connections"]) * 10000)
except:
- data["Thread_cache_misses"] = 0
+ data["Thread_cache_misses"] = None
return data
diff --git a/python.d/nginx.chart.py b/python.d/nginx.chart.py
index ff8bfede..88849a92 100644
--- a/python.d/nginx.chart.py
+++ b/python.d/nginx.chart.py
@@ -22,24 +22,24 @@ ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
CHARTS = {
'connections': {
- 'options': [None, 'nginx Active Connections', 'connections', 'nginx', 'nginx.connections', 'line'],
+ 'options': [None, 'nginx Active Connections', 'connections', 'active connections', 'nginx.connections', 'line'],
'lines': [
["active"]
]},
'requests': {
- 'options': [None, 'nginx Requests', 'requests/s', 'nginx', 'nginx.requests', 'line'],
+ 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
'lines': [
["requests", None, 'incremental']
]},
'connection_status': {
- 'options': [None, 'nginx Active Connections by Status', 'connections', 'nginx', 'nginx.connection_status', 'line'],
+ 'options': [None, 'nginx Active Connections by Status', 'connections', 'status', 'nginx.connection_status', 'line'],
'lines': [
["reading"],
["writing"],
["waiting", "idle"]
]},
'connect_rate': {
- 'options': [None, 'nginx Connections Rate', 'connections/s', 'nginx', 'nginx.connect_rate', 'line'],
+ 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate', 'nginx.connect_rate', 'line'],
'lines': [
["accepts", "accepted", "incremental"],
["handled", None, "incremental"]
diff --git a/python.d/nginx_log.chart.py b/python.d/nginx_log.chart.py
index 95fb123d..ef964a56 100644
--- a/python.d/nginx_log.chart.py
+++ b/python.d/nginx_log.chart.py
@@ -15,9 +15,11 @@ CHARTS = {
'options': [None, 'nginx status codes', 'requests/s', 'requests', 'nginx_log.codes', 'stacked'],
'lines': [
["2xx", None, "incremental"],
+ ["5xx", None, "incremental"],
["3xx", None, "incremental"],
["4xx", None, "incremental"],
- ["5xx", None, "incremental"]
+ ["1xx", None, "incremental"],
+ ["other", None, "incremental"]
]}
}
@@ -33,37 +35,48 @@ class Service(LogService):
#pattern = r'(?:" )([0-9][0-9][0-9]) ?'
self.regex = re.compile(pattern)
+ self.data = {
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0
+ }
+
def _get_data(self):
"""
Parse new log lines
:return: dict
"""
- data = {'2xx': 0,
- '3xx': 0,
- '4xx': 0,
- '5xx': 0}
try:
raw = self._get_raw_data()
if raw is None:
return None
elif not raw:
- return data
+ return self.data
except (ValueError, AttributeError):
return None
regex = self.regex
for line in raw:
code = regex.search(line)
- beginning = code.group(1)[0]
+ try:
+ beginning = code.group(1)[0]
+ except AttributeError:
+ continue
if beginning == '2':
- data["2xx"] += 1
+ self.data["2xx"] += 1
elif beginning == '3':
- data["3xx"] += 1
+ self.data["3xx"] += 1
elif beginning == '4':
- data["4xx"] += 1
+ self.data["4xx"] += 1
elif beginning == '5':
- data["5xx"] += 1
-
- return data
+ self.data["5xx"] += 1
+ elif beginning == '1':
+ self.data["1xx"] += 1
+ else:
+ self.data["other"] += 1
+ return self.data
diff --git a/python.d/ovpn_status_log.chart.py b/python.d/ovpn_status_log.chart.py
new file mode 100644
index 00000000..c5fca002
--- /dev/null
+++ b/python.d/ovpn_status_log.chart.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# Description: openvpn status log netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from re import compile, findall, search, subn
+priority = 60000
+retries = 60
+update_every = 10
+
+ORDER = ['users', 'traffic']
+CHARTS = {
+ 'users': {
+ 'options': [None, 'OpenVPN active users', 'active users', 'Users', 'openvpn_status.users', 'line'],
+ 'lines': [
+ ["users", None, "absolute"],
+ ]},
+ 'traffic': {
+ 'options': [None, 'OpenVPN traffic', 'kilobit/s', 'Traffic', 'openvpn_status.traffic', 'area'],
+ 'lines': [
+ ["in", None, "incremental", 8, 1000], ["out", None, "incremental", 8, -1000]
+ ]},
+
+}
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.log_path = self.configuration.get('log_path')
+ self.regex_data_inter = compile(r'(?<=Since ).*?(?=.ROUTING)')
+ self.regex_data_final = compile(r'\d{1,3}(?:\.\d{1,3}){3}[:0-9,. ]*')
+ self.regex_users = compile(r'\d{1,3}(?:\.\d{1,3}){3}:\d+')
+ self.regex_traffic = compile(r'(?<=(?:,| ))\d+(?=(?:,| ))')
+
+ def check(self):
+ if not self._get_raw_data():
+ self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ return False
+ else:
+ self.info('Plugin was started succesfully')
+ return True
+
+ def _get_raw_data(self):
+ """
+ Open log file
+ :return: str
+ """
+ try:
+ with open(self.log_path, 'rt') as log:
+ result = log.read()
+ except Exception:
+ return None
+ else:
+ return result
+
+ def _get_data(self):
+ """
+ Parse openvpn-status log file.
+ Current regex version is ok for status-version 1, 2 and 3. Both users and bytes in/out are collecting.
+ """
+
+ raw_data = self._get_raw_data()
+ try:
+ data_inter = self.regex_data_inter.search(' '.join(raw_data.splitlines())).group()
+ except AttributeError:
+ data_inter = ''
+
+ data_final = ' '.join(self.regex_data_final.findall(data_inter))
+ users = self.regex_users.subn('', data_final)[1]
+ traffic = self.regex_traffic.findall(data_final)
+
+ bytes_in = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 1])
+ bytes_out = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 0])
+
+ return {'users': users, 'in': bytes_in, 'out': bytes_out}
diff --git a/python.d/phpfpm.chart.py b/python.d/phpfpm.chart.py
index d4168cc1..b79a35d7 100755
--- a/python.d/phpfpm.chart.py
+++ b/python.d/phpfpm.chart.py
@@ -23,37 +23,37 @@ ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_
CHARTS = {
'connections': {
- 'options': [None, 'PHP-FPM Active Connections', 'connections', 'phpfpm', 'phpfpm.connections', 'line'],
+ 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections', 'line'],
'lines': [
["active"],
["maxActive", 'max active'],
["idle"]
]},
'requests': {
- 'options': [None, 'PHP-FPM Requests', 'requests/s', 'phpfpm', 'phpfpm.requests', 'line'],
+ 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
'lines': [
["requests", None, "incremental"]
]},
'performance': {
- 'options': [None, 'PHP-FPM Performance', 'status', 'phpfpm', 'phpfpm.performance', 'line'],
+ 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
'lines': [
["reached", 'max children reached'],
["slow", 'slow requests']
]},
'request_duration': {
- 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'phpfpm', 'phpfpm.request_duration', 'line'],
+ 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration', 'line'],
'lines': [
["maxReqDur", 'max request duration'],
["avgReqDur", 'average request duration']
]},
'request_cpu': {
- 'options': [None, 'PHP-FPM Request CPU', 'percent', 'phpfpm', 'phpfpm.request_cpu', 'line'],
+ 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
["maxReqCPU", 'max request cpu'],
["avgReqCPU", 'average request cpu']
]},
'request_mem': {
- 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'phpfpm', 'phpfpm.request_mem', 'line'],
+ 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
["maxReqMem", 'max request memory'],
["avgReqMem", 'average request memory']
@@ -93,9 +93,14 @@ class Service(UrlService):
raw_json = json.loads(raw)
except ValueError:
return None
- data = {self.assignment[k]: v for k, v in raw_json.items() if k in self.assignment}
+ data = {}
+ for k,v in raw_json.items():
+ if k in self.assignment:
+ data[self.assignment[k]] = v
+
if '&full' in self.url or '?full' in self.url:
c = 0
+ sum_val = {}
for proc in raw_json['processes']:
if proc['state'] != 'Idle':
continue
@@ -108,9 +113,14 @@ class Service(UrlService):
d = d/1024
if 'max' + v not in data or data['max' + v] < d:
data['max' + v] = d
- if 'avg' + v not in data:
+ if 'avg' + v not in sum_val:
+ sum_val['avg' + v] = 0
data['avg' + v] = 0
- data['avg' + v] = (data['avg' + v] + d) / c
+ sum_val['avg' + v] += d
+ if len(sum_val):
+ for k, v in sum_val.items():
+ data[k] = v/c
+
if len(data) == 0:
return None
return data
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py
new file mode 100644
index 00000000..919b6f8e
--- /dev/null
+++ b/python.d/postgres.chart.py
@@ -0,0 +1,386 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Authors: facetoe, dangtranhoang
+
+import re
+from copy import deepcopy
+
+import psycopg2
+from psycopg2 import extensions
+from psycopg2.extras import DictCursor
+
+from base import SimpleService
+
+# default module values
+update_every = 1
+priority = 90000
+retries = 60
+
+ARCHIVE = """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+"""
+
+BACKENDS = """
+SELECT
+ count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
+ (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
+FROM
+ pg_stat_activity;
+"""
+
+TABLE_STATS = """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS size_relations,
+ count(1) AS relations
+FROM pg_class
+WHERE relkind IN ('r', 't');
+"""
+
+INDEX_STATS = """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS size_indexes,
+ count(1) AS indexes
+FROM pg_class
+WHERE relkind = 'i';"""
+
+DATABASE = """
+SELECT
+ datname AS database_name,
+ sum(numbackends) AS connections,
+ sum(xact_commit) AS xact_commit,
+ sum(xact_rollback) AS xact_rollback,
+ sum(blks_read) AS blks_read,
+ sum(blks_hit) AS blks_hit,
+ sum(tup_returned) AS tup_returned,
+ sum(tup_fetched) AS tup_fetched,
+ sum(tup_inserted) AS tup_inserted,
+ sum(tup_updated) AS tup_updated,
+ sum(tup_deleted) AS tup_deleted,
+ sum(conflicts) AS conflicts
+FROM pg_stat_database
+WHERE NOT datname ~* '^template\d+'
+GROUP BY database_name;
+"""
+
+BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
+DATABASE_LOCKS = """
+SELECT
+ pg_database.datname as database_name,
+ mode,
+ count(mode) AS count
+FROM pg_locks
+ INNER JOIN pg_database ON pg_database.oid = pg_locks.database
+GROUP BY datname, mode
+ORDER BY datname, mode;
+"""
+REPLICATION = """
+SELECT
+ client_hostname,
+ client_addr,
+ state,
+ sent_offset - (
+ replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
+FROM (
+ SELECT
+ client_addr, client_hostname, state,
+ ('x' || lpad(split_part(sent_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
+ ('x' || lpad(split_part(replay_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
+ ('x' || lpad(split_part(sent_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
+ ('x' || lpad(split_part(replay_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
+ FROM pg_stat_replication
+) AS s;
+"""
+
+LOCK_TYPES = [
+ 'ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock'
+]
+
+ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write',
+ 'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'background_writer']
+
+CHARTS = {
+ 'db_stat_transactions': {
+ 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions', 'line'],
+ 'lines': [
+ ['db_stat_xact_commit', 'committed', 'incremental'],
+ ['db_stat_xact_rollback', 'rolled back', 'incremental']
+ ]},
+ 'db_stat_connections': {
+ 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections', 'line'],
+ 'lines': [
+ ['db_stat_connections', 'connections', 'absolute']
+ ]},
+ 'db_stat_tuple_read': {
+ 'options': [None, 'Tuple reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_tuple_read', 'line'],
+ 'lines': [
+ ['db_stat_blks_read', 'disk', 'incremental'],
+ ['db_stat_blks_hit', 'cache', 'incremental']
+ ]},
+ 'db_stat_tuple_returned': {
+ 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned', 'line'],
+ 'lines': [
+ ['db_stat_tup_returned', 'sequential', 'incremental'],
+ ['db_stat_tup_fetched', 'bitmap', 'incremental']
+ ]},
+ 'db_stat_tuple_write': {
+ 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
+ 'lines': [
+ ['db_stat_tup_inserted', 'inserted', 'incremental'],
+ ['db_stat_tup_updated', 'updated', 'incremental'],
+ ['db_stat_tup_deleted', 'deleted', 'incremental'],
+ ['db_stat_conflicts', 'conflicts', 'incremental']
+ ]},
+ 'backend_process': {
+ 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process', 'line'],
+ 'lines': [
+ ['backend_process_active', 'active', 'absolute'],
+ ['backend_process_idle', 'idle', 'absolute']
+ ]},
+ 'index_count': {
+ 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
+ 'lines': [
+ ['index_count', 'total', 'absolute']
+ ]},
+ 'index_size': {
+ 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
+ 'lines': [
+ ['index_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]},
+ 'table_count': {
+ 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
+ 'lines': [
+ ['table_count', 'total', 'absolute']
+ ]},
+ 'table_size': {
+ 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
+ 'lines': [
+ ['table_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]},
+ 'wal': {
+ 'options': [None, 'Write-Ahead Logging Statistics', 'files/s', 'write ahead log', 'postgres.wal', 'line'],
+ 'lines': [
+ ['wal_total', 'total', 'incremental'],
+ ['wal_ready', 'ready', 'incremental'],
+ ['wal_done', 'done', 'incremental']
+ ]},
+ 'background_writer': {
+ 'options': [None, 'Checkpoints', 'writes/s', 'background writer', 'postgres.background_writer', 'line'],
+ 'lines': [
+ ['background_writer_scheduled', 'scheduled', 'incremental'],
+ ['background_writer_requested', 'requested', 'incremental']
+ ]}
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ super(self.__class__, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.table_stats = configuration.pop('table_stats', True)
+ self.index_stats = configuration.pop('index_stats', True)
+ self.configuration = configuration
+ self.connection = None
+ self.is_superuser = False
+ self.data = {}
+ self.databases = set()
+
+ def _connect(self):
+ params = dict(user='postgres',
+ database=None,
+ password=None,
+ host='localhost',
+ port=5432)
+ params.update(self.configuration)
+
+ if not self.connection:
+ self.connection = psycopg2.connect(**params)
+ self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.connection.set_session(readonly=True)
+
+ def check(self):
+ try:
+ self._connect()
+ cursor = self.connection.cursor()
+ self._discover_databases(cursor)
+ self._check_if_superuser(cursor)
+ cursor.close()
+
+ self._create_definitions()
+ return True
+ except Exception as e:
+ self.error(str(e))
+ return False
+
+ def _discover_databases(self, cursor):
+ cursor.execute("""
+ SELECT datname
+ FROM pg_stat_database
+ WHERE NOT datname ~* '^template\d+'
+ """)
+ self.databases = set(r[0] for r in cursor)
+
+ def _check_if_superuser(self, cursor):
+ cursor.execute("""
+ SELECT current_setting('is_superuser') = 'on' AS is_superuser;
+ """)
+ self.is_superuser = cursor.fetchone()[0]
+
+ def _create_definitions(self):
+ for database_name in self.databases:
+ for chart_template_name in list(CHARTS):
+ if chart_template_name.startswith('db_stat'):
+ self._add_database_stat_chart(chart_template_name, database_name)
+ self._add_database_lock_chart(database_name)
+
+ def _add_database_stat_chart(self, chart_template_name, database_name):
+ chart_template = CHARTS[chart_template_name]
+ chart_name = "{0}_{1}".format(database_name, chart_template_name)
+ if chart_name not in self.order:
+ self.order.insert(0, chart_name)
+ name, title, units, family, context, chart_type = chart_template['options']
+ self.definitions[chart_name] = {
+ 'options': [
+ name,
+ title + ': ' + database_name,
+ units,
+ 'db ' + database_name,
+ context,
+ chart_type
+ ]
+ }
+
+ self.definitions[chart_name]['lines'] = []
+ for line in deepcopy(chart_template['lines']):
+ line[0] = "{0}_{1}".format(database_name, line[0])
+ self.definitions[chart_name]['lines'].append(line)
+
+ def _add_database_lock_chart(self, database_name):
+ chart_name = "{0}_locks".format(database_name)
+ if chart_name not in self.order:
+ self.order.insert(-1, chart_name)
+ self.definitions[chart_name] = dict(
+ options=
+ [
+ None,
+ 'Locks on db: ' + database_name,
+ 'locks',
+ 'db ' + database_name,
+ 'postgres.db_locks',
+ 'line'
+ ],
+ lines=[]
+ )
+
+ for lock_type in LOCK_TYPES:
+ lock_id = "{0}_{1}".format(database_name, lock_type)
+ label = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", lock_type)
+ self.definitions[chart_name]['lines'].append([lock_id, label, 'absolute'])
+
+ def _get_data(self):
+ self._connect()
+
+ cursor = self.connection.cursor(cursor_factory=DictCursor)
+ self.add_stats(cursor)
+
+ cursor.close()
+ return self.data
+
+ def add_stats(self, cursor):
+ self.add_database_stats(cursor)
+ self.add_backend_stats(cursor)
+ if self.index_stats:
+ self.add_index_stats(cursor)
+ if self.table_stats:
+ self.add_table_stats(cursor)
+ self.add_lock_stats(cursor)
+ self.add_bgwriter_stats(cursor)
+
+ # self.add_replication_stats(cursor)
+
+ if self.is_superuser:
+ self.add_wal_stats(cursor)
+
+ def add_database_stats(self, cursor):
+ cursor.execute(DATABASE)
+ for row in cursor:
+ database_name = row.get('database_name')
+ self.data["{0}_{1}".format(database_name, 'db_stat_xact_commit')] = int(row.get('xact_commit', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_xact_rollback')] = int(row.get('xact_rollback', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_blks_read')] = int(row.get('blks_read', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_blks_hit')] = int(row.get('blks_hit', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_tup_returned')] = int(row.get('tup_returned', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_tup_fetched')] = int(row.get('tup_fetched', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_tup_inserted')] = int(row.get('tup_inserted', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_tup_updated')] = int(row.get('tup_updated', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_tup_deleted')] = int(row.get('tup_deleted', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_conflicts')] = int(row.get('conflicts', 0))
+ self.data["{0}_{1}".format(database_name, 'db_stat_connections')] = int(row.get('connections', 0))
+
+ def add_backend_stats(self, cursor):
+ cursor.execute(BACKENDS)
+ temp = cursor.fetchone()
+
+ self.data['backend_process_active'] = int(temp.get('backends_active', 0))
+ self.data['backend_process_idle'] = int(temp.get('backends_idle', 0))
+
+ def add_index_stats(self, cursor):
+ cursor.execute(INDEX_STATS)
+ temp = cursor.fetchone()
+ self.data['index_count'] = int(temp.get('indexes', 0))
+ self.data['index_size'] = int(temp.get('size_indexes', 0))
+
+ def add_table_stats(self, cursor):
+ cursor.execute(TABLE_STATS)
+ temp = cursor.fetchone()
+ self.data['table_count'] = int(temp.get('relations', 0))
+ self.data['table_size'] = int(temp.get('size_relations', 0))
+
+ def add_lock_stats(self, cursor):
+ cursor.execute(DATABASE_LOCKS)
+
+ # zero out all current lock values
+ for database_name in self.databases:
+ for lock_type in LOCK_TYPES:
+ self.data["{0}_{1}".format(database_name, lock_type)] = 0
+
+ # populate those that have current locks
+ for row in cursor:
+ database_name, lock_type, lock_count = row
+ self.data["{0}_{1}".format(database_name, lock_type)] = lock_count
+
+ def add_wal_stats(self, cursor):
+ cursor.execute(ARCHIVE)
+ temp = cursor.fetchone()
+ self.data['wal_total'] = int(temp.get('file_count', 0))
+ self.data['wal_ready'] = int(temp.get('ready_count', 0))
+ self.data['wal_done'] = int(temp.get('done_count', 0))
+
+ def add_bgwriter_stats(self, cursor):
+ cursor.execute(BGWRITER)
+ temp = cursor.fetchone()
+ self.data['background_writer_scheduled'] = temp.get('checkpoints_timed', 0)
+ self.data['background_writer_requested'] = temp.get('checkpoints_requests', 0)
+
+'''
+ def add_replication_stats(self, cursor):
+ cursor.execute(REPLICATION)
+ temp = cursor.fetchall()
+ for row in temp:
+ self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
+ 'byte_lag',
+ int(row.get('byte_lag', 0)))
+'''
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
index 1508e096..320c54ba 100644
--- a/python.d/python_modules/base.py
+++ b/python.d/python_modules/base.py
@@ -116,33 +116,21 @@ class SimpleService(threading.Thread):
Return value presents exit status of update()
:return: boolean
"""
- t_start = time.time()
- timetable = self.timetable
+ t_start = float(time.time())
chart_name = self.chart_name
- # check if it is time to execute job update() function
- if timetable['next'] > t_start:
- self.debug(chart_name, "will be run in", str(int((timetable['next'] - t_start) * 1000)), "ms")
- return True
- since_last = int((t_start - timetable['last']) * 1000000)
- self.debug(chart_name,
- "ready to run, after", str(int((t_start - timetable['last']) * 1000)),
- "ms (update_every:", str(timetable['freq'] * 1000),
- "ms, latency:", str(int((t_start - timetable['next']) * 1000)), "ms")
+ since_last = int((t_start - self.timetable['last']) * 1000000)
if self.__first_run:
since_last = 0
+
if not self.update(since_last):
self.error("update function failed.")
return False
- t_end = time.time()
- self.timetable['next'] = t_end - (t_end % timetable['freq']) + timetable['freq']
+
# draw performance graph
- run_time = str(int((t_end - t_start) * 1000))
- # noinspection SqlNoDataSourceInspection
+ run_time = int((time.time() - t_start) * 1000)
print("BEGIN netdata.plugin_pythond_%s %s\nSET run_time = %s\nEND\n" %
- (self.chart_name, str(since_last), run_time))
- # sys.stdout.write("BEGIN netdata.plugin_pythond_%s %s\nSET run_time = %s\nEND\n" %
- # (self.chart_name, str(since_last), run_time))
+ (self.chart_name, str(since_last), str(run_time)))
self.debug(chart_name, "updated in", str(run_time), "ms")
self.timetable['last'] = t_start
@@ -155,23 +143,48 @@ class SimpleService(threading.Thread):
Exits when job failed or timed out.
:return: None
"""
- self.timetable['last'] = time.time()
+ step = float(self.timetable['freq'])
+ penalty = 0
+ self.timetable['last'] = float(time.time() - step)
+ self.debug("starting data collection - update frequency:", str(step), " retries allowed:", str(self.retries))
while True: # run forever, unless something is wrong
+ now = float(time.time())
+ next = self.timetable['next'] = now - (now % step) + step + penalty
+
+ # it is important to do this in a loop
+ # sleep() is interruptable
+ while now < next:
+ self.debug("sleeping for", str(next - now), "secs to reach frequency of", str(step), "secs, now:", str(now), " next:", str(next), " penalty:", str(penalty))
+ time.sleep(next - now)
+ now = float(time.time())
+
+ # do the job
try:
status = self._run_once()
except Exception as e:
- self.error("Something wrong: ", str(e))
- return
- if status: # handle retries if update failed
- time.sleep(self.timetable['next'] - time.time())
+ status = False
+
+ if status:
+ # it is good
self.retries_left = self.retries
+ penalty = 0
else:
+ # it failed
self.retries_left -= 1
if self.retries_left <= 0:
- self.error("no more retries. Exiting")
- return
+ if penalty == 0:
+ penalty = float(self.retries * step) / 2
+ else:
+ penalty *= 1.5
+
+ if penalty > 600:
+ penalty = 600
+
+ self.retries_left = self.retries
+ self.alert("failed to collect data for " + str(self.retries) + " times - increasing penalty to " + str(penalty) + " sec and trying again")
+
else:
- time.sleep(self.timetable['freq'])
+ self.error("failed to collect data - " + str(self.retries_left) + " retries left - penalty: " + str(penalty) + " sec")
# --- CHART ---
@@ -307,7 +320,10 @@ class SimpleService(threading.Thread):
"""
Upload new data to netdata.
"""
- print(self._data_stream)
+ try:
+ print(self._data_stream)
+ except Exception as e:
+ msg.fatal('cannot send data to netdata:', str(e))
self._data_stream = ""
# --- ERROR HANDLING ---
@@ -318,6 +334,12 @@ class SimpleService(threading.Thread):
"""
msg.error(self.chart_name, *params)
+ def alert(self, *params):
+ """
+ Show error message on stderr
+ """
+ msg.alert(self.chart_name, *params)
+
def debug(self, *params):
"""
Show debug message on stderr
@@ -345,10 +367,18 @@ class SimpleService(threading.Thread):
:return: boolean
"""
self.debug("Module", str(self.__module__), "doesn't implement check() function. Using default.")
- if self._get_data() is None or len(self._get_data()) == 0:
+ data = self._get_data()
+
+ if data is None:
+ self.debug("failed to receive data during check().")
return False
- else:
- return True
+
+ if len(data) == 0:
+ self.debug("empty data during check().")
+ return False
+
+ self.debug("successfully received data during check(): '" + str(data) + "'")
+ return True
def create(self):
"""
@@ -357,6 +387,7 @@ class SimpleService(threading.Thread):
"""
data = self._get_data()
if data is None:
+ self.debug("failed to receive data during create().")
return False
idx = 0
@@ -380,7 +411,7 @@ class SimpleService(threading.Thread):
"""
data = self._get_data()
if data is None:
- self.debug("_get_data() returned no data")
+ self.debug("failed to receive data during update().")
return False
updated = False
@@ -458,7 +489,7 @@ class UrlService(SimpleService):
return None
try:
- raw = f.read().decode('utf-8')
+ raw = f.read().decode('utf-8', 'ignore')
except Exception as e:
self.error(str(e))
finally:
@@ -506,8 +537,83 @@ class SocketService(SimpleService):
self.unix_socket = None
self.request = ""
self.__socket_config = None
+ self.__empty_request = "".encode()
SimpleService.__init__(self, configuration=configuration, name=name)
+ def _socketerror(self, message=None):
+ if self.unix_socket is not None:
+ self.error("unix socket '" + self.unix_socket + "':", message)
+ else:
+ if self.__socket_config is not None:
+ af, socktype, proto, canonname, sa = self.__socket_config
+ self.error("socket to '" + str(sa[0]) + "' port " + str(sa[1]) + ":", message)
+ else:
+ self.error("unknown socket:", message)
+
+ def _connect2socket(self, res=None):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: boolean
+ """
+ if res is None:
+ res = self.__socket_config
+ if res is None:
+ self.error("Cannot create socket to 'None':")
+ return False
+
+ af, socktype, proto, canonname, sa = res
+ try:
+ self.debug("creating socket to '" + str(sa[0]) + "', port " + str(sa[1]))
+ self._sock = socket.socket(af, socktype, proto)
+ except socket.error as e:
+ self.error("Failed to create socket to '" + str(sa[0]) + "', port " + str(sa[1]) + ":", str(e))
+ self._sock = None
+ self.__socket_config = None
+ return False
+
+ try:
+ self.debug("connecting socket to '" + str(sa[0]) + "', port " + str(sa[1]))
+ self._sock.connect(sa)
+ except socket.error as e:
+ self.error("Failed to connect to '" + str(sa[0]) + "', port " + str(sa[1]) + ":", str(e))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ self.debug("connected to '" + str(sa[0]) + "', port " + str(sa[1]))
+ self.__socket_config = res
+ return True
+
+ def _connect2unixsocket(self):
+ """
+ Connect to a unix socket, given its filename
+ :return: boolean
+ """
+ if self.unix_socket is None:
+ self.error("cannot connect to unix socket 'None'")
+ return False
+
+ try:
+ self.debug("attempting DGRAM unix socket '" + str(self.unix_socket) + "'")
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ self._sock.connect(self.unix_socket)
+ self.debug("connected DGRAM unix socket '" + str(self.unix_socket) + "'")
+ return True
+ except socket.error as e:
+ self.debug("Failed to connect DGRAM unix socket '" + str(self.unix_socket) + "':", str(e))
+
+ try:
+ self.debug("attempting STREAM unix socket '" + str(self.unix_socket) + "'")
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._sock.connect(self.unix_socket)
+ self.debug("connected STREAM unix socket '" + str(self.unix_socket) + "'")
+ return True
+ except socket.error as e:
+ self.debug("Failed to connect STREAM unix socket '" + str(self.unix_socket) + "':", str(e))
+ self.error("Failed to connect to unix socket '" + str(self.unix_socket) + "':", str(e))
+ self._sock = None
+ return False
+
def _connect(self):
"""
Recreate socket and connect to it since sockets cannot be reused after closing
@@ -515,63 +621,38 @@ class SocketService(SimpleService):
:return:
"""
try:
- if self.unix_socket is None:
- if self.__socket_config is None:
- # establish ipv6 or ipv4 connection.
- for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
- try:
- # noinspection SpellCheckingInspection
- af, socktype, proto, canonname, sa = res
- self._sock = socket.socket(af, socktype, proto)
- except socket.error as e:
- self.debug("Cannot create socket:", str(e))
- self._sock = None
- continue
- try:
- self._sock.connect(sa)
- except socket.error as e:
- self.debug("Cannot connect to socket:", str(e))
- self._disconnect()
- continue
- self.__socket_config = res
- break
- else:
- # connect to socket with previously established configuration
- try:
- af, socktype, proto, canonname, sa = self.__socket_config
- self._sock = socket.socket(af, socktype, proto)
- self._sock.connect(sa)
- except socket.error as e:
- self.debug("Cannot create or connect to socket:", str(e))
- self._disconnect()
+ if self.unix_socket is not None:
+ self._connect2unixsocket()
+
else:
- # connect to unix socket
- try:
- self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- self._sock.connect(self.unix_socket)
- except socket.error:
- self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self._sock.connect(self.unix_socket)
+ if self.__socket_config is not None:
+ self._connect2socket()
+ else:
+ for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+ if self._connect2socket(res): break
except Exception as e:
- self.error(str(e),
- "Cannot create socket with following configuration: host:", str(self.host),
- "port:", str(self.port),
- "socket:", str(self.unix_socket))
self._sock = None
- self._sock.setblocking(0)
+ self.__socket_config = None
+
+ if self._sock is not None:
+ self._sock.setblocking(0)
+ self._sock.settimeout(5)
+ self.debug("set socket timeout to: " + str(self._sock.gettimeout()))
def _disconnect(self):
"""
Close socket connection
:return:
"""
- try:
- self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
- self._sock.close()
- except Exception:
- pass
- self._sock = None
+ if self._sock is not None:
+ try:
+ self.debug("closing socket")
+ self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ self._sock.close()
+ except Exception:
+ pass
+ self._sock = None
def _send(self):
"""
@@ -579,15 +660,13 @@ class SocketService(SimpleService):
:return: boolean
"""
# Send request if it is needed
- if self.request != "".encode():
+ if self.request != self.__empty_request:
try:
+ self.debug("sending request:", str(self.request))
self._sock.send(self.request)
except Exception as e:
+ self._socketerror("error sending request:" + str(e))
self._disconnect()
- self.error(str(e),
- "used configuration: host:", str(self.host),
- "port:", str(self.port),
- "socket:", str(self.unix_socket))
return False
return True
@@ -598,24 +677,28 @@ class SocketService(SimpleService):
"""
data = ""
while True:
+ self.debug("receiving response")
try:
- ready_to_read, _, in_error = select.select([self._sock], [], [], 5)
+ buf = self._sock.recv(4096)
except Exception as e:
- self.debug("SELECT", str(e))
+ self._socketerror("failed to receive response:" + str(e))
self._disconnect()
break
- if len(ready_to_read) > 0:
- buf = self._sock.recv(4096)
- if len(buf) == 0 or buf is None: # handle server disconnect
- break
- data += buf.decode()
- if self._check_raw_data(data):
- break
- else:
- self.error("Socket timed out.")
+
+ if buf is None or len(buf) == 0: # handle server disconnect
+ if data == "":
+ self._socketerror("unexpectedly disconnected")
+ else:
+ self.debug("server closed the connection")
self._disconnect()
break
+ self.debug("received data:", str(buf))
+ data += buf.decode('utf-8', 'ignore')
+ if self._check_raw_data(data):
+ break
+
+ self.debug("final response:", str(data))
return data
def _get_raw_data(self):
@@ -625,6 +708,8 @@ class SocketService(SimpleService):
"""
if self._sock is None:
self._connect()
+ if self._sock is None:
+ return None
# Send request if it is needed
if not self._send():
@@ -654,6 +739,7 @@ class SocketService(SimpleService):
self.name = ""
else:
self.name = str(self.name)
+
try:
self.unix_socket = str(self.configuration['socket'])
except (KeyError, TypeError):
@@ -667,10 +753,12 @@ class SocketService(SimpleService):
self.port = int(self.configuration['port'])
except (KeyError, TypeError):
self.debug("No port specified. Using: '" + str(self.port) + "'")
+
try:
self.request = str(self.configuration['request'])
except (KeyError, TypeError):
self.debug("No request specified. Using: '" + str(self.request) + "'")
+
self.request = self.request.encode()
def check(self):
@@ -724,7 +812,7 @@ class LogService(SimpleService):
try:
self.log_path = str(self.configuration['path'])
except (KeyError, TypeError):
- self.error("No path to log specified. Using: '" + self.log_path + "'")
+ self.info("No path to log specified. Using: '" + self.log_path + "'")
if os.access(self.log_path, os.R_OK):
return True
@@ -779,22 +867,25 @@ class ExecutableService(SimpleService):
try:
self.command = str(self.configuration['command'])
except (KeyError, TypeError):
- self.error("No command specified. Using: '" + self.command + "'")
- command = self.command.split(' ')
+ self.info("No command specified. Using: '" + self.command + "'")
+ # Splitting self.command on every space so subprocess.Popen reads it properly
+ self.command = self.command.split(' ')
- for arg in command[1:]:
+ for arg in self.command[1:]:
if any(st in arg for st in self.bad_substrings):
self.error("Bad command argument:" + " ".join(self.command[1:]))
return False
+
# test command and search for it in /usr/sbin or /sbin when failed
- base = command[0].split('/')[-1]
+ base = self.command[0].split('/')[-1]
if self._get_raw_data() is None:
for prefix in ['/sbin/', '/usr/sbin/']:
- command[0] = prefix + base
- if os.path.isfile(command[0]):
+ self.command[0] = prefix + base
+ if os.path.isfile(self.command[0]):
break
- self.command = command
+
if self._get_data() is None or len(self._get_data()) == 0:
self.error("Command", self.command, "returned no data")
return False
+
return True
diff --git a/python.d/python_modules/msg.py b/python.d/python_modules/msg.py
index ecefba0a..74716770 100644
--- a/python.d/python_modules/msg.py
+++ b/python.d/python_modules/msg.py
@@ -1,14 +1,17 @@
# -*- coding: utf-8 -*-
# Description: logging for netdata python.d modules
+import traceback
import sys
from time import time, strftime
DEBUG_FLAG = False
+TRACE_FLAG = False
PROGRAM = ""
-LOG_COUNTER = 2
-LOG_INTERVAL = 5
-NEXT_CHECK = 0
+LOG_COUNTER = 0
+LOG_THROTTLE = 10000 # has to be too big during init
+LOG_INTERVAL = 1 # has to be too low during init
+LOG_NEXT_CHECK = 0
WRITE = sys.stderr.write
FLUSH = sys.stderr.flush
@@ -20,23 +23,39 @@ def log_msg(msg_type, *args):
:param msg_type: str
"""
global LOG_COUNTER
- if not DEBUG_FLAG:
- LOG_COUNTER -= 1
+ global LOG_THROTTLE
+ global LOG_INTERVAL
+ global LOG_NEXT_CHECK
now = time()
- if LOG_COUNTER >= 0:
- timestamp = strftime('%y-%m-%d %X')
+
+ if not DEBUG_FLAG:
+ LOG_COUNTER += 1
+
+ # WRITE("COUNTER " + str(LOG_COUNTER) + " THROTTLE " + str(LOG_THROTTLE) + " INTERVAL " + str(LOG_INTERVAL) + " NOW " + str(now) + " NEXT " + str(LOG_NEXT_CHECK) + "\n")
+
+ if LOG_COUNTER <= LOG_THROTTLE or msg_type == "FATAL" or msg_type == "ALERT":
+ timestamp = strftime('%Y-%m-%d %X')
msg = "%s: %s %s: %s" % (timestamp, PROGRAM, str(msg_type), " ".join(args))
WRITE(msg + "\n")
FLUSH()
+ elif LOG_COUNTER == LOG_THROTTLE + 1:
+ timestamp = strftime('%Y-%m-%d %X')
+ msg = "%s: python.d.plugin: throttling further log messages for %s seconds" % (timestamp, str(int(LOG_NEXT_CHECK + 0.5) - int(now)))
+ WRITE(msg + "\n")
+ FLUSH()
- global NEXT_CHECK
- if NEXT_CHECK <= now:
- NEXT_CHECK = now - (now % LOG_INTERVAL) + LOG_INTERVAL
- if LOG_COUNTER < 0:
- timestamp = strftime('%y-%m-%d %X')
- msg = "%s: Prevented %s log messages from displaying" % (timestamp, str(0 - LOG_COUNTER))
+ if LOG_NEXT_CHECK <= now:
+ if LOG_COUNTER >= LOG_THROTTLE:
+ timestamp = strftime('%Y-%m-%d %X')
+ msg = "%s: python.d.plugin: Prevented %s log messages from displaying" % (timestamp, str(LOG_COUNTER - LOG_THROTTLE))
WRITE(msg + "\n")
FLUSH()
+ LOG_NEXT_CHECK = now - (now % LOG_INTERVAL) + LOG_INTERVAL
+ LOG_COUNTER = 0
+
+ if TRACE_FLAG:
+ if msg_type == "FATAL" or msg_type == "ERROR" or msg_type == "ALERT":
+ traceback.print_exc()
def debug(*args):
@@ -56,6 +75,13 @@ def error(*args):
log_msg("ERROR", *args)
+def alert(*args):
+ """
+ Print message on stderr.
+ """
+ log_msg("ALERT", *args)
+
+
def info(*args):
"""
Print message on stderr.
@@ -67,8 +93,9 @@ def fatal(*args):
"""
Print message on stderr and exit.
"""
- log_msg("FATAL", *args)
- # using sys.stdout causes IOError: Broken Pipe
- print('DISABLE')
- # sys.stdout.write('DISABLE\n')
- sys.exit(1) \ No newline at end of file
+ try:
+ log_msg("FATAL", *args)
+ print('DISABLE')
+ except:
+ pass
+ sys.exit(1)
diff --git a/python.d/python_modules/pyyaml2/__init__.py b/python.d/python_modules/pyyaml2/__init__.py
deleted file mode 100644
index 76e19e13..00000000
--- a/python.d/python_modules/pyyaml2/__init__.py
+++ /dev/null
@@ -1,315 +0,0 @@
-
-from error import *
-
-from tokens import *
-from events import *
-from nodes import *
-
-from loader import *
-from dumper import *
-
-__version__ = '3.11'
-
-try:
- from cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- from StringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(object):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __metaclass__ = YAMLObjectMetaclass
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
- from_yaml = classmethod(from_yaml)
-
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
- to_yaml = classmethod(to_yaml)
-
diff --git a/python.d/python_modules/pyyaml2/composer.py b/python.d/python_modules/pyyaml2/composer.py
deleted file mode 100644
index 06e5ac78..00000000
--- a/python.d/python_modules/pyyaml2/composer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from error import MarkedYAMLError
-from events import *
-from nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer(object):
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor.encode('utf-8'), event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/python.d/python_modules/pyyaml2/constructor.py b/python.d/python_modules/pyyaml2/constructor.py
deleted file mode 100644
index 635faac3..00000000
--- a/python.d/python_modules/pyyaml2/constructor.py
+++ /dev/null
@@ -1,675 +0,0 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor(object):
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = generator.next()
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- try:
- hash(key)
- except TypeError, exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
- add_constructor = classmethod(add_constructor)
-
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
- add_multi_constructor = classmethod(add_multi_constructor)
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return BaseConstructor.construct_scalar(self, node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return BaseConstructor.construct_mapping(self, node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- u'yes': True,
- u'no': False,
- u'true': True,
- u'false': False,
- u'on': True,
- u'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- value = self.construct_scalar(node)
- try:
- return str(value).decode('base64')
- except (binascii.Error, UnicodeEncodeError), exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- ur'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- try:
- return value.encode('ascii')
- except UnicodeEncodeError:
- return value
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
- node.start_mark)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node).encode('utf-8')
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_long(self, node):
- return long(self.construct_yaml_int(node))
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if u'.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = '__builtin__'
- object_name = name
- try:
- __import__(module_name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r" % (object_name.encode('utf-8'),
- module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- class classobj: pass
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type(self.classobj)) \
- and not args and not kwds:
- instance = self.classobj()
- instance.__class__ = cls
- return instance
- elif newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/python.d/python_modules/pyyaml2/cyaml.py
deleted file mode 100644
index 68dcd751..00000000
--- a/python.d/python_modules/pyyaml2/cyaml.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from constructor import *
-
-from serializer import *
-from representer import *
-
-from resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml2/dumper.py b/python.d/python_modules/pyyaml2/dumper.py
deleted file mode 100644
index f811d2c9..00000000
--- a/python.d/python_modules/pyyaml2/dumper.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml2/emitter.py b/python.d/python_modules/pyyaml2/emitter.py
deleted file mode 100644
index e5bcdccc..00000000
--- a/python.d/python_modules/pyyaml2/emitter.py
+++ /dev/null
@@ -1,1140 +0,0 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from error import YAMLError
-from events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter(object):
-
- DEFAULT_TAG_PREFIXES = {
- u'!' : u'!',
- u'tag:yaml.org,2002:' : u'!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = u'\n'
- if line_break in [u'\r', u'\n', u'\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not getattr(self.stream, 'encoding', None):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = self.event.tags.keys()
- handles.sort()
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator(u'---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor(u'&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor(u'*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator(u'[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator(u'{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(u':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator(u'-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(u':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == u'')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = u'!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return u'%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (handle.encode('utf-8')))
- for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch.encode('utf-8'), handle.encode('utf-8')))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == u'!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return u''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == u'!':
- return tag
- handle = None
- suffix = tag
- prefixes = self.tag_prefixes.keys()
- prefixes.sort()
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
- if handle:
- return u'%s%s' % (handle, suffix_text)
- else:
- return u'!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch.encode('utf-8'), anchor.encode('utf-8')))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith(u'---') or scalar.startswith(u'...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in u'#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in u'?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in u',?[]{}':
- flow_indicators = True
- if ch == u':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in u'\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == u' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in u'\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = u' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = u' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = u'%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = u'%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator(u'\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != u' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
- self.write_indicator(u'\'', False)
-
- ESCAPE_REPLACEMENTS = {
- u'\0': u'0',
- u'\x07': u'a',
- u'\x08': u'b',
- u'\x09': u't',
- u'\x0A': u'n',
- u'\x0B': u'v',
- u'\x0C': u'f',
- u'\x0D': u'r',
- u'\x1B': u'e',
- u'\"': u'\"',
- u'\\': u'\\',
- u'\x85': u'N',
- u'\xA0': u'_',
- u'\u2028': u'L',
- u'\u2029': u'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator(u'"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E'
- or (self.allow_unicode
- and (u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= u'\xFF':
- data = u'\\x%02X' % ord(ch)
- elif ch <= u'\uFFFF':
- data = u'\\u%04X' % ord(ch)
- else:
- data = u'\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+u'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == u' ':
- data = u'\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator(u'"', False)
-
- def determine_block_hints(self, text):
- hints = u''
- if text:
- if text[0] in u' \n\x85\u2028\u2029':
- hints += unicode(self.best_indent)
- if text[-1] not in u'\n\x85\u2028\u2029':
- hints += u'-'
- elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
- hints += u'+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'>'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
- self.write_line_break()
- leading_space = (ch == u' ')
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'|'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u'\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = u' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
diff --git a/python.d/python_modules/pyyaml2/error.py b/python.d/python_modules/pyyaml2/error.py
deleted file mode 100644
index 577686db..00000000
--- a/python.d/python_modules/pyyaml2/error.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark(object):
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end].encode('utf-8')
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/python.d/python_modules/pyyaml2/events.py b/python.d/python_modules/pyyaml2/events.py
deleted file mode 100644
index f79ad389..00000000
--- a/python.d/python_modules/pyyaml2/events.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/python.d/python_modules/pyyaml2/loader.py b/python.d/python_modules/pyyaml2/loader.py
deleted file mode 100644
index 293ff467..00000000
--- a/python.d/python_modules/pyyaml2/loader.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml2/nodes.py b/python.d/python_modules/pyyaml2/nodes.py
deleted file mode 100644
index c4f070c4..00000000
--- a/python.d/python_modules/pyyaml2/nodes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/python.d/python_modules/pyyaml2/parser.py b/python.d/python_modules/pyyaml2/parser.py
deleted file mode 100644
index f9e3057f..00000000
--- a/python.d/python_modules/pyyaml2/parser.py
+++ /dev/null
@@ -1,589 +0,0 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser(object):
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle.encode('utf-8'),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle.encode('utf-8'),
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
diff --git a/python.d/python_modules/pyyaml2/reader.py b/python.d/python_modules/pyyaml2/reader.py
deleted file mode 100644
index 3249e6b9..00000000
--- a/python.d/python_modules/pyyaml2/reader.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, str):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to unicode,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `str` object,
- # - a `unicode` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = u''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, unicode):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+u'\0'
- elif isinstance(stream, str):
- self.name = "<string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = ''
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
- self.line += 1
- self.column = 0
- elif ch != u'\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and len(self.raw_buffer) < 2:
- self.update_raw()
- if not isinstance(self.raw_buffer, unicode):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError, exc:
- character = exc.object[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += u'\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=1024):
- data = self.stream.read(size)
- if data:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- else:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml2/representer.py b/python.d/python_modules/pyyaml2/representer.py
deleted file mode 100644
index 5f4fc70d..00000000
--- a/python.d/python_modules/pyyaml2/representer.py
+++ /dev/null
@@ -1,484 +0,0 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import sys, copy_reg, types
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter(object):
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def get_classobj_bases(self, cls):
- bases = [cls]
- for base in cls.__bases__:
- bases.extend(self.get_classobj_bases(base))
- return bases
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if type(data) is types.InstanceType:
- data_types = self.get_classobj_bases(data.__class__)+list(data_types)
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, unicode(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
- add_representer = classmethod(add_representer)
-
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
- add_multi_representer = classmethod(add_multi_representer)
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = mapping.items()
- mapping.sort()
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, unicode, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'null')
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_bool(self, data):
- if data:
- value = u'true'
- else:
- value = u'false'
- return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- def represent_long(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = u'.nan'
- elif data == self.inf_value:
- value = u'.inf'
- elif data == -self.inf_value:
- value = u'-.inf'
- else:
- value = unicode(repr(data)).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if u'.' not in value and u'e' in value:
- value = value.replace(u'e', u'.0e', 1)
- return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = unicode(data.isoformat())
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = unicode(data.isoformat(' '))
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:python/str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- tag = None
- try:
- data.encode('ascii')
- tag = u'tag:yaml.org,2002:python/unicode'
- except UnicodeEncodeError:
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data)
-
- def represent_long(self, data):
- tag = u'tag:yaml.org,2002:int'
- if int(data) is not data:
- tag = u'tag:yaml.org,2002:python/long'
- return self.represent_scalar(tag, unicode(data))
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = u'%r' % data.real
- elif data.real == 0.0:
- data = u'%rj' % data.imag
- elif data.imag > 0:
- data = u'%r+%rj' % (data.real, data.imag)
- else:
- data = u'%r%rj' % (data.real, data.imag)
- return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
-
- def represent_module(self, data):
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
- def represent_instance(self, data):
- # For instances of classic classes, we use __getinitargs__ and
- # __getstate__ to serialize the data.
-
- # If data.__getinitargs__ exists, the object must be reconstructed by
- # calling cls(**args), where args is a tuple returned by
- # __getinitargs__. Otherwise, the cls.__init__ method should never be
- # called and the class instance is created by instantiating a trivial
- # class and assigning to the instance's __class__ variable.
-
- # If data.__getstate__ exists, it returns the state of the object.
- # Otherwise, the state of the object is data.__dict__.
-
- # We produce either a !!python/object or !!python/object/new node.
- # If data.__getinitargs__ does not exist and state is a dictionary, we
- # produce a !!python/object node . Otherwise we produce a
- # !!python/object/new node.
-
- cls = data.__class__
- class_name = u'%s.%s' % (cls.__module__, cls.__name__)
- args = None
- state = None
- if hasattr(data, '__getinitargs__'):
- args = list(data.__getinitargs__())
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__
- if args is None and isinstance(state, dict):
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+class_name, state)
- if isinstance(state, dict) and not state:
- return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:'+class_name, args)
- value = {}
- if args:
- value['args'] = args
- value['state'] = state
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copy_reg.dispatch_table:
- reduce = copy_reg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = u'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = u'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(str,
- Representer.represent_str)
-
-Representer.add_representer(unicode,
- Representer.represent_unicode)
-
-Representer.add_representer(long,
- Representer.represent_long)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.ClassType,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/python.d/python_modules/pyyaml2/resolver.py b/python.d/python_modules/pyyaml2/resolver.py
deleted file mode 100644
index 6b5ab875..00000000
--- a/python.d/python_modules/pyyaml2/resolver.py
+++ /dev/null
@@ -1,224 +0,0 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from error import *
-from nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver(object):
-
- DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
- add_implicit_resolver = classmethod(add_implicit_resolver)
-
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, basestring) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (basestring, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
- add_path_resolver = classmethod(add_path_resolver)
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, basestring):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, basestring):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:bool',
- re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:float',
- re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:int',
- re.compile(ur'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:merge',
- re.compile(ur'^(?:<<)$'),
- [u'<'])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:null',
- re.compile(ur'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:timestamp',
- re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:value',
- re.compile(ur'^(?:=)$'),
- [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:yaml',
- re.compile(ur'^(?:!|&|\*)$'),
- list(u'!&*'))
-
diff --git a/python.d/python_modules/pyyaml2/scanner.py b/python.d/python_modules/pyyaml2/scanner.py
deleted file mode 100644
index 5228fad6..00000000
--- a/python.d/python_modules/pyyaml2/scanner.py
+++ /dev/null
@@ -1,1457 +0,0 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from error import MarkedYAMLError
-from tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey(object):
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner(object):
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == u'\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == u'%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == u'-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == u'.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == u'\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == u'[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == u'{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == u']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == u'}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == u',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == u'?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == u':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == u'*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == u'&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == u'!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == u'\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == u'\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % ch.encode('utf-8'), self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in self.possible_simple_keys.keys():
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'---' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'...' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
- and (ch == u'-' or (not self.flow_level and ch in u'?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == u'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == u'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not (u'0' <= ch <= u'9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 0
- while u'0' <= self.peek(length) <= u'9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == u' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == u'*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == u'<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != u'>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- elif ch in u'\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = u'!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = u'!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = u'!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != u'\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in u' \t'
- length = 0
- while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != u'\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == u'\n' \
- and leading_non_space and self.peek() not in u' \t':
- if not breaks:
- chunks.append(u' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == u'\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(u' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() != u' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- while self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'\\': u'\\',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
- }
-
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == u'\'' and self.peek(1) == u'\'':
- chunks.append(u'\'')
- self.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == u'\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k).encode('utf-8')), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(unichr(code))
- self.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in u' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == u'\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in u' \t':
- self.forward()
- if self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == u'#':
- break
- while True:
- ch = self.peek(length)
- if ch in u'\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == u':' and
- self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in u',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == u':'
- and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == u'#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in u' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if ch != u'!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return u''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- bytes = []
- mark = self.get_mark()
- while self.peek() == u'%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
- (self.peek(k).encode('utf-8')), self.get_mark())
- bytes.append(chr(int(self.prefix(2), 16)))
- self.forward(2)
- try:
- value = unicode(''.join(bytes), 'utf-8')
- except UnicodeDecodeError, exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in u'\r\n\x85':
- if self.prefix(2) == u'\r\n':
- self.forward(2)
- else:
- self.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
- self.forward()
- return ch
- return u''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml2/serializer.py b/python.d/python_modules/pyyaml2/serializer.py
deleted file mode 100644
index 0bf1e96d..00000000
--- a/python.d/python_modules/pyyaml2/serializer.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from error import YAMLError
-from events import *
-from nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer(object):
-
- ANCHOR_TEMPLATE = u'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/python.d/python_modules/pyyaml2/tokens.py b/python.d/python_modules/pyyaml2/tokens.py
deleted file mode 100644
index 4d0b48a3..00000000
--- a/python.d/python_modules/pyyaml2/tokens.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/python.d/python_modules/pyyaml3/__init__.py b/python.d/python_modules/pyyaml3/__init__.py
deleted file mode 100644
index a5e20f94..00000000
--- a/python.d/python_modules/pyyaml3/__init__.py
+++ /dev/null
@@ -1,312 +0,0 @@
-
-from .error import *
-
-from .tokens import *
-from .events import *
-from .nodes import *
-
-from .loader import *
-from .dumper import *
-
-__version__ = '3.11'
-try:
- from .cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-import io
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- stream = io.StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(metaclass=YAMLObjectMetaclass):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- @classmethod
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
-
- @classmethod
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
-
diff --git a/python.d/python_modules/pyyaml3/composer.py b/python.d/python_modules/pyyaml3/composer.py
deleted file mode 100644
index d5c6a7ac..00000000
--- a/python.d/python_modules/pyyaml3/composer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from .error import MarkedYAMLError
-from .events import *
-from .nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer:
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor, event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor, self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == '!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/python.d/python_modules/pyyaml3/constructor.py b/python.d/python_modules/pyyaml3/constructor.py
deleted file mode 100644
index 981543ae..00000000
--- a/python.d/python_modules/pyyaml3/constructor.py
+++ /dev/null
@@ -1,686 +0,0 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from .error import *
-from .nodes import *
-
-import collections, datetime, base64, binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor:
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = next(generator)
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- if not isinstance(key, collections.Hashable):
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- @classmethod
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
-
- @classmethod
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == 'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return super().construct_scalar(node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == 'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == 'tag:yaml.org,2002:value':
- key_node.tag = 'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return super().construct_mapping(node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- 'yes': True,
- 'no': False,
- 'true': True,
- 'false': False,
- 'on': True,
- 'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- r'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- return self.construct_scalar(node)
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag,
- node.start_mark)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node)
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_bytes(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- def construct_python_long(self, node):
- return self.construct_yaml_int(node)
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name, exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if '.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = 'builtins'
- object_name = name
- try:
- __import__(module_name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name, exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r"
- % (object_name, module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bytes',
- Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/python.d/python_modules/pyyaml3/cyaml.py
deleted file mode 100644
index d5cb87e9..00000000
--- a/python.d/python_modules/pyyaml3/cyaml.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from .constructor import *
-
-from .serializer import *
-from .representer import *
-
-from .resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml3/dumper.py b/python.d/python_modules/pyyaml3/dumper.py
deleted file mode 100644
index 0b691287..00000000
--- a/python.d/python_modules/pyyaml3/dumper.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from .emitter import *
-from .serializer import *
-from .representer import *
-from .resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml3/emitter.py b/python.d/python_modules/pyyaml3/emitter.py
deleted file mode 100644
index 34cb145a..00000000
--- a/python.d/python_modules/pyyaml3/emitter.py
+++ /dev/null
@@ -1,1137 +0,0 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from .error import YAMLError
-from .events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis:
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter:
-
- DEFAULT_TAG_PREFIXES = {
- '!' : '!',
- 'tag:yaml.org,2002:' : '!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = '\n'
- if line_break in ['\r', '\n', '\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not hasattr(self.stream, 'encoding'):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = sorted(self.event.tags.keys())
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator('---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator('...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor('&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor('*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator('[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator('{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator('-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == '')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = '!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return '%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != '!' or handle[-1] != '!':
- raise EmitterError("tag handle must start and end with '!': %r" % handle)
- for ch in handle[1:-1]:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch, handle))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == '!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return ''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == '!':
- return tag
- handle = None
- suffix = tag
- prefixes = sorted(self.tag_prefixes.keys())
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == '!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.~*\'()[]' \
- or (ch == '!' and handle != '!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = ''.join(chunks)
- if handle:
- return '%s%s' % (handle, suffix_text)
- else:
- return '!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch, anchor))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith('---') or scalar.startswith('...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in '#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in '?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in ',?[]{}':
- flow_indicators = True
- if ch == ':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in '\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
- if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == ' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in '\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write('\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = ' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = ' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = '%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = '%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator('\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != ' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == '\'':
- data = '\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
- self.write_indicator('\'', False)
-
- ESCAPE_REPLACEMENTS = {
- '\0': '0',
- '\x07': 'a',
- '\x08': 'b',
- '\x09': 't',
- '\x0A': 'n',
- '\x0B': 'v',
- '\x0C': 'f',
- '\x0D': 'r',
- '\x1B': 'e',
- '\"': '\"',
- '\\': '\\',
- '\x85': 'N',
- '\xA0': '_',
- '\u2028': 'L',
- '\u2029': 'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator('"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
- or not ('\x20' <= ch <= '\x7E'
- or (self.allow_unicode
- and ('\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= '\xFF':
- data = '\\x%02X' % ord(ch)
- elif ch <= '\uFFFF':
- data = '\\u%04X' % ord(ch)
- else:
- data = '\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == ' ':
- data = '\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator('"', False)
-
- def determine_block_hints(self, text):
- hints = ''
- if text:
- if text[0] in ' \n\x85\u2028\u2029':
- hints += str(self.best_indent)
- if text[-1] not in '\n\x85\u2028\u2029':
- hints += '-'
- elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
- hints += '+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('>'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != ' ' \
- and text[start] == '\n':
- self.write_line_break()
- leading_space = (ch == ' ')
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- spaces = (ch == ' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('|'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in '\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = ' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
diff --git a/python.d/python_modules/pyyaml3/error.py b/python.d/python_modules/pyyaml3/error.py
deleted file mode 100644
index b796b4dc..00000000
--- a/python.d/python_modules/pyyaml3/error.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark:
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end]
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/python.d/python_modules/pyyaml3/events.py b/python.d/python_modules/pyyaml3/events.py
deleted file mode 100644
index f79ad389..00000000
--- a/python.d/python_modules/pyyaml3/events.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/python.d/python_modules/pyyaml3/loader.py b/python.d/python_modules/pyyaml3/loader.py
deleted file mode 100644
index 08c8f01b..00000000
--- a/python.d/python_modules/pyyaml3/loader.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from .reader import *
-from .scanner import *
-from .parser import *
-from .composer import *
-from .constructor import *
-from .resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/python.d/python_modules/pyyaml3/nodes.py b/python.d/python_modules/pyyaml3/nodes.py
deleted file mode 100644
index c4f070c4..00000000
--- a/python.d/python_modules/pyyaml3/nodes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/python.d/python_modules/pyyaml3/parser.py b/python.d/python_modules/pyyaml3/parser.py
deleted file mode 100644
index 13a5995d..00000000
--- a/python.d/python_modules/pyyaml3/parser.py
+++ /dev/null
@@ -1,589 +0,0 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-from .events import *
-from .scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser:
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- '!': '!',
- '!!': 'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == 'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == 'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle,
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle,
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == '!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == '!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == '!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), '',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), '', mark, mark)
-
diff --git a/python.d/python_modules/pyyaml3/reader.py b/python.d/python_modules/pyyaml3/reader.py
deleted file mode 100644
index f70e920f..00000000
--- a/python.d/python_modules/pyyaml3/reader.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from .error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, bytes):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to a unicode string,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `bytes` object,
- # - a `str` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = ''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, str):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+'\0'
- elif isinstance(stream, bytes):
- self.name = "<byte string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = None
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in '\n\x85\u2028\u2029' \
- or (ch == '\r' and self.buffer[self.pointer] != '\n'):
- self.line += 1
- self.column = 0
- elif ch != '\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
- self.update_raw()
- if isinstance(self.raw_buffer, bytes):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError as exc:
- character = self.raw_buffer[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += '\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=4096):
- data = self.stream.read(size)
- if self.raw_buffer is None:
- self.raw_buffer = data
- else:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- if not data:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml3/representer.py b/python.d/python_modules/pyyaml3/representer.py
deleted file mode 100644
index 67cd6fd2..00000000
--- a/python.d/python_modules/pyyaml3/representer.py
+++ /dev/null
@@ -1,374 +0,0 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from .error import *
-from .nodes import *
-
-import datetime, sys, copyreg, types, base64
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter:
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, str(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- @classmethod
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
-
- @classmethod
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = list(mapping.items())
- try:
- mapping = sorted(mapping)
- except TypeError:
- pass
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, bytes, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar('tag:yaml.org,2002:null', 'null')
-
- def represent_str(self, data):
- return self.represent_scalar('tag:yaml.org,2002:str', data)
-
- def represent_binary(self, data):
- if hasattr(base64, 'encodebytes'):
- data = base64.encodebytes(data).decode('ascii')
- else:
- data = base64.encodestring(data).decode('ascii')
- return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
-
- def represent_bool(self, data):
- if data:
- value = 'true'
- else:
- value = 'false'
- return self.represent_scalar('tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar('tag:yaml.org,2002:int', str(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = '.nan'
- elif data == self.inf_value:
- value = '.inf'
- elif data == -self.inf_value:
- value = '-.inf'
- else:
- value = repr(data).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if '.' not in value and 'e' in value:
- value = value.replace('e', '.0e', 1)
- return self.represent_scalar('tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence('tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping('tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping('tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = data.isoformat()
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = data.isoformat(' ')
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(bytes,
- SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = '%r' % data.real
- elif data.real == 0.0:
- data = '%rj' % data.imag
- elif data.imag > 0:
- data = '%r+%rj' % (data.real, data.imag)
- else:
- data = '%r%rj' % (data.real, data.imag)
- return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = '%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
-
- def represent_module(self, data):
- return self.represent_scalar(
- 'tag:yaml.org,2002:python/module:'+data.__name__, '')
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copyreg.dispatch_table:
- reduce = copyreg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = 'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = 'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = '%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- 'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/python.d/python_modules/pyyaml3/resolver.py b/python.d/python_modules/pyyaml3/resolver.py
deleted file mode 100644
index 0eece258..00000000
--- a/python.d/python_modules/pyyaml3/resolver.py
+++ /dev/null
@@ -1,224 +0,0 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from .error import *
-from .nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver:
-
- DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- @classmethod
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-
- @classmethod
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, str) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (str, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, str):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, str):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == '':
- resolvers = self.yaml_implicit_resolvers.get('', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:bool',
- re.compile(r'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list('yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:float',
- re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list('-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:int',
- re.compile(r'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list('-+0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:merge',
- re.compile(r'^(?:<<)$'),
- ['<'])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:null',
- re.compile(r'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- ['~', 'n', 'N', ''])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:timestamp',
- re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list('0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:value',
- re.compile(r'^(?:=)$'),
- ['='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:yaml',
- re.compile(r'^(?:!|&|\*)$'),
- list('!&*'))
-
diff --git a/python.d/python_modules/pyyaml3/scanner.py b/python.d/python_modules/pyyaml3/scanner.py
deleted file mode 100644
index 494d975b..00000000
--- a/python.d/python_modules/pyyaml3/scanner.py
+++ /dev/null
@@ -1,1448 +0,0 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey:
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner:
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == '\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == '%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == '-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == '.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == '\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == '[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == '{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == ']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == '}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == ',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == '-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == '?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == ':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == '*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == '&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == '!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == '|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == '>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == '\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == '\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token" % ch,
- self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in list(self.possible_simple_keys):
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '---' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '...' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
- and (ch == '-' or (not self.flow_level and ch in '?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == '\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == 'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == 'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r" % self.peek(),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not ('0' <= ch <= '9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch, self.get_mark())
- length = 0
- while '0' <= self.peek(length) <= '9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == ' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != ' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch, self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == '*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == '<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != '>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- elif ch in '\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = '!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in '\0 \r\n\x85\u2028\u2029':
- if ch == '!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = '!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = '!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = ''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != '\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in ' \t'
- length = 0
- while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != '\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == '\n' \
- and leading_non_space and self.peek() not in ' \t':
- if not breaks:
- chunks.append(' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == '\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch, self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r" % ch,
- self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() != ' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- while self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- '0': '\0',
- 'a': '\x07',
- 'b': '\x08',
- 't': '\x09',
- '\t': '\x09',
- 'n': '\x0A',
- 'v': '\x0B',
- 'f': '\x0C',
- 'r': '\x0D',
- 'e': '\x1B',
- ' ': '\x20',
- '\"': '\"',
- '\\': '\\',
- 'N': '\x85',
- '_': '\xA0',
- 'L': '\u2028',
- 'P': '\u2029',
- }
-
- ESCAPE_CODES = {
- 'x': 2,
- 'u': 4,
- 'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == '\'' and self.peek(1) == '\'':
- chunks.append('\'')
- self.forward(2)
- elif (double and ch == '\'') or (not double and ch in '\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == '\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k)), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(chr(code))
- self.forward(length)
- elif ch in '\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch, self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in ' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == '\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in ' \t':
- self.forward()
- if self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == '#':
- break
- while True:
- ch = self.peek(length)
- if ch in '\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == ':' and
- self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in ',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == ':'
- and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == '#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in ' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != '!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != ' ':
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if ch != '!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.!~*\'()[]%':
- if ch == '%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch, self.get_mark())
- return ''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- codes = []
- mark = self.get_mark()
- while self.peek() == '%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
- % self.peek(k), self.get_mark())
- codes.append(int(self.prefix(2), 16))
- self.forward(2)
- try:
- value = bytes(codes).decode('utf-8')
- except UnicodeDecodeError as exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in '\r\n\x85':
- if self.prefix(2) == '\r\n':
- self.forward(2)
- else:
- self.forward()
- return '\n'
- elif ch in '\u2028\u2029':
- self.forward()
- return ch
- return ''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/python.d/python_modules/pyyaml3/serializer.py b/python.d/python_modules/pyyaml3/serializer.py
deleted file mode 100644
index fe911e67..00000000
--- a/python.d/python_modules/pyyaml3/serializer.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from .error import YAMLError
-from .events import *
-from .nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer:
-
- ANCHOR_TEMPLATE = 'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/python.d/python_modules/pyyaml3/tokens.py b/python.d/python_modules/pyyaml3/tokens.py
deleted file mode 100644
index 4d0b48a3..00000000
--- a/python.d/python_modules/pyyaml3/tokens.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py
index 218401e1..61f4f6d6 100644
--- a/python.d/redis.chart.py
+++ b/python.d/redis.chart.py
@@ -19,40 +19,58 @@ retries = 60
# 'unix_socket': None
# }}
-ORDER = ['operations', 'hit_rate', 'memory', 'keys', 'clients', 'slaves']
+ORDER = ['operations', 'hit_rate', 'memory', 'keys', 'net', 'connections', 'clients', 'slaves', 'persistence']
CHARTS = {
'operations': {
- 'options': [None, 'Operations', 'operations/s', 'Statistics', 'redis.operations', 'line'],
+ 'options': [None, 'Redis Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
'lines': [
+ ['total_commands_processed', 'commands', 'incremental'],
['instantaneous_ops_per_sec', 'operations', 'absolute']
]},
'hit_rate': {
- 'options': [None, 'Hit rate', 'percent', 'Statistics', 'redis.hit_rate', 'line'],
+ 'options': [None, 'Redis Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
'lines': [
['hit_rate', 'rate', 'absolute']
]},
'memory': {
- 'options': [None, 'Memory utilization', 'kilobytes', 'Memory', 'redis.memory', 'line'],
+ 'options': [None, 'Redis Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
'lines': [
['used_memory', 'total', 'absolute', 1, 1024],
['used_memory_lua', 'lua', 'absolute', 1, 1024]
]},
+ 'net': {
+ 'options': [None, 'Redis Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
+ 'lines': [
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
+ ]},
'keys': {
- 'options': [None, 'Database keys', 'keys', 'Keys', 'redis.keys', 'line'],
+ 'options': [None, 'Redis Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
'lines': [
# lines are created dynamically in `check()` method
]},
+ 'connections': {
+ 'options': [None, 'Redis Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
+ 'lines': [
+ ['total_connections_received', 'received', 'incremental', 1],
+ ['rejected_connections', 'rejected', 'incremental', -1]
+ ]},
'clients': {
- 'options': [None, 'Clients', 'clients', 'Clients', 'redis.clients', 'line'],
+ 'options': [None, 'Redis Clients', 'clients', 'connections', 'redis.clients', 'line'],
'lines': [
- ['connected_clients', 'connected', 'absolute'],
- ['blocked_clients', 'blocked', 'absolute']
+ ['connected_clients', 'connected', 'absolute', 1],
+ ['blocked_clients', 'blocked', 'absolute', -1]
]},
'slaves': {
- 'options': [None, 'Slaves', 'slaves', 'Replication', 'redis.slaves', 'line'],
+ 'options': [None, 'Redis Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
'lines': [
['connected_slaves', 'connected', 'absolute']
+ ]},
+ 'persistence': {
+ 'options': [None, 'Redis Persistence Changes Since Last Save', 'changes', 'persistence', 'redis.rdb_changes', 'line'],
+ 'lines': [
+ ['rdb_changes_since_last_save', 'changes', 'absolute']
]}
}
@@ -61,46 +79,73 @@ class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
self.request = "INFO\r\n"
- self.host = "localhost"
- self.port = 6379
- self.unix_socket = None
self.order = ORDER
self.definitions = CHARTS
self._keep_alive = True
self.chart_name = ""
+ self.passwd = None
+ self.port = 6379
+ if 'port' in configuration:
+ self.port = configuration['port']
+ if 'pass' in configuration:
+ self.passwd = configuration['pass']
+ if 'host' in configuration:
+ self.host = configuration['host']
+ if 'socket' in configuration:
+ self.unix_socket = configuration['socket']
def _get_data(self):
"""
Get data from socket
:return: dict
"""
+ if self.passwd:
+ info_request = self.request
+ self.request = "AUTH " + self.passwd + "\r\n"
+ raw = self._get_raw_data().strip()
+ if raw != "+OK":
+ self.error("invalid password")
+ return None
+ self.request = info_request
+ response = self._get_raw_data()
+ if response is None:
+ # error has already been logged
+ return None
+
try:
- raw = self._get_raw_data().split("\n")
+ parsed = response.split("\n")
except AttributeError:
- self.error("no data received")
+ self.error("response is invalid/empty")
return None
+
data = {}
- for line in raw:
- if line.startswith(('instantaneous', 'keyspace', 'used_memory', 'connected', 'blocked')):
- try:
- t = line.split(':')
- data[t[0]] = int(t[1])
- except (IndexError, ValueError):
- pass
- elif line.startswith('db'):
+ for line in parsed:
+ if len(line) < 5 or line[0] == '$' or line[0] == '#':
+ continue
+
+ if line.startswith('db'):
tmp = line.split(',')[0].replace('keys=', '')
record = tmp.split(':')
- data[record[0]] = int(record[1])
+ data[record[0]] = record[1]
+ continue
+
+ try:
+ t = line.split(':')
+ data[t[0]] = t[1]
+ except (IndexError, ValueError):
+ self.debug("invalid line received: " + str(line))
+ pass
+
+ if len(data) == 0:
+ self.error("received data doesn't have any records")
+ return None
+
try:
- data['hit_rate'] = int((data['keyspace_hits'] / float(data['keyspace_hits'] + data['keyspace_misses'])) * 100)
+ data['hit_rate'] = (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
except:
data['hit_rate'] = 0
- if len(data) == 0:
- self.error("received data doesn't have needed records")
- return None
- else:
- return data
+ return data
def _check_raw_data(self, data):
"""
@@ -112,12 +157,15 @@ class Service(SocketService):
length = len(data)
supposed = data.split('\n')[0][1:]
offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
+ if not supposed.isdigit():
+ return True
supposed = int(supposed)
+
if length - offset >= supposed:
+ self.debug("received full response from redis")
return True
- else:
- return False
+ self.debug("waiting more data from redis")
return False
def check(self):
diff --git a/python.d/sensors.chart.py b/python.d/sensors.chart.py
index 23bccb97..e83aacfd 100644
--- a/python.d/sensors.chart.py
+++ b/python.d/sensors.chart.py
@@ -77,6 +77,9 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = []
self.definitions = {}
+ self.celsius = ('Celsius', lambda x: x)
+ self.fahrenheit = ('Fahrenheit', lambda x: x * 9 / 5 + 32) if self.configuration.get('fahrenheit') else False
+ self.choice = (choice for choice in [self.fahrenheit, self.celsius] if choice)
self.chips = []
def _get_data(self):
@@ -94,7 +97,10 @@ class Service(SimpleService):
limit = LIMITS[typeName];
if val < limit[0] or val > limit[1]:
continue
- data[prefix + "_" + str(feature.name.decode())] = int(val * 1000)
+ if 'temp' in str(feature.name.decode()):
+ data[prefix + "_" + str(feature.name.decode())] = int(self.calc(val) * 1000)
+ else:
+ data[prefix + "_" + str(feature.name.decode())] = int(val * 1000)
except Exception as e:
self.error(e)
return None
@@ -121,6 +127,8 @@ class Service(SimpleService):
self.order.append(name)
chart_def = list(CHARTS[type]['options'])
chart_def[1] = chip_name + chart_def[1]
+ if chart_def[2] == 'Celsius':
+ chart_def[2] = self.choice[0]
self.definitions[name] = {'options': chart_def}
self.definitions[name]['lines'] = []
line = list(CHARTS[type]['lines'][0])
@@ -134,10 +142,20 @@ class Service(SimpleService):
except Exception as e:
self.error(e)
return False
+
+ try:
+ self.choice = next(self.choice)
+ except StopIteration:
+ # That can not happen but..
+ self.choice = ('Celsius', lambda x: x)
+ self.calc = self.choice[1]
+ else:
+ self.calc = self.choice[1]
try:
self._create_definitions()
except Exception as e:
self.error(e)
return False
+
return True
diff --git a/python.d/squid.chart.py b/python.d/squid.chart.py
index 8300d9ba..e9e8f1d0 100644
--- a/python.d/squid.chart.py
+++ b/python.d/squid.chart.py
@@ -58,20 +58,25 @@ class Service(SocketService):
Get data via http request
:return: dict
"""
+ response = self._get_raw_data()
+
data = {}
try:
raw = ""
- for tmp in self._get_raw_data().split('\r\n'):
+ for tmp in response.split('\r\n'):
if tmp.startswith("sample_time"):
raw = tmp
break
+
if raw.startswith('<'):
self.error("invalid data received")
return None
+
for row in raw.split('\n'):
if row.startswith(("client", "server.all")):
tmp = row.split("=")
data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
+
except (ValueError, AttributeError, TypeError):
self.error("invalid data received")
return None
@@ -83,15 +88,19 @@ class Service(SocketService):
return data
def _check_raw_data(self, data):
- if "Connection: keep-alive" in data[:1024]:
+ header = data[:1024].lower()
+
+ if "connection: keep-alive" in header:
self._keep_alive = True
else:
self._keep_alive = False
- if data[-7:] == "\r\n0\r\n\r\n" and "Transfer-Encoding: chunked" in data[:1024]: # HTTP/1.1 response
+ if data[-7:] == "\r\n0\r\n\r\n" and "transfer-encoding: chunked" in header: # HTTP/1.1 response
+ self.debug("received full response from squid")
return True
- else:
- return False
+
+ self.debug("waiting more data from squid")
+ return False
def check(self):
"""
diff --git a/python.d/varnish.chart.py b/python.d/varnish.chart.py
new file mode 100644
index 00000000..2b1512f4
--- /dev/null
+++ b/python.d/varnish.chart.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+# Description: varnish netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from re import compile
+from os import access as is_executable, X_OK
+from subprocess import Popen, PIPE
+
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+ORDER = ['session', 'hit_rate', 'chit_rate', 'expunge', 'threads', 'backend_health', 'memory_usage', 'bad', 'uptime']
+
+CHARTS = {'backend_health':
+ {'lines': [['backend_conn', 'conn', 'incremental', 1, 1],
+ ['backend_unhealthy', 'unhealthy', 'incremental', 1, 1],
+ ['backend_busy', 'busy', 'incremental', 1, 1],
+ ['backend_fail', 'fail', 'incremental', 1, 1],
+ ['backend_reuse', 'reuse', 'incremental', 1, 1],
+ ['backend_recycle', 'resycle', 'incremental', 1, 1],
+ ['backend_toolate', 'toolate', 'incremental', 1, 1],
+ ['backend_retry', 'retry', 'incremental', 1, 1],
+ ['backend_req', 'req', 'incremental', 1, 1]],
+ 'options': [None, 'Backend health', 'connections', 'Backend health', 'varnish.backend_traf', 'line']},
+ 'bad':
+ {'lines': [['sess_drop_b', None, 'incremental', 1, 1],
+ ['backend_unhealthy_b', None, 'incremental', 1, 1],
+ ['fetch_failed', None, 'incremental', 1, 1],
+ ['backend_busy_b', None, 'incremental', 1, 1],
+ ['threads_failed_b', None, 'incremental', 1, 1],
+ ['threads_limited_b', None, 'incremental', 1, 1],
+ ['threads_destroyed_b', None, 'incremental', 1, 1],
+ ['thread_queue_len_b', 'queue_len', 'absolute', 1, 1],
+ ['losthdr_b', None, 'incremental', 1, 1],
+ ['esi_errors_b', None, 'incremental', 1, 1],
+ ['esi_warnings_b', None, 'incremental', 1, 1],
+ ['sess_fail_b', None, 'incremental', 1, 1],
+ ['sc_pipe_overflow_b', None, 'incremental', 1, 1],
+ ['sess_pipe_overflow_b', None, 'incremental', 1, 1]],
+ 'options': [None, 'Misbehavior', 'problems', 'Problems summary', 'varnish.bad', 'line']},
+ 'expunge':
+ {'lines': [['n_expired', 'expired', 'incremental', 1, 1],
+ ['n_lru_nuked', 'lru_nuked', 'incremental', 1, 1]],
+ 'options': [None, 'Object expunging', 'objects', 'Cache performance', 'varnish.expunge', 'line']},
+ 'hit_rate':
+ {'lines': [['cache_hit_perc', 'hit', 'absolute', 1, 100],
+ ['cache_miss_perc', 'miss', 'absolute', 1, 100],
+ ['cache_hitpass_perc', 'hitpass', 'absolute', 1, 100]],
+ 'options': [None, 'All history hit rate ratio','percent', 'Cache performance', 'varnish.hit_rate', 'stacked']},
+ 'chit_rate':
+ {'lines': [['cache_hit_cperc', 'hit', 'absolute', 1, 100],
+ ['cache_miss_cperc', 'miss', 'absolute', 1, 100],
+ ['cache_hitpass_cperc', 'hitpass', 'absolute', 1, 100]],
+ 'options': [None, 'Current poll hit rate ratio','percent', 'Cache performance', 'varnish.chit_rate', 'stacked']},
+ 'memory_usage':
+ {'lines': [['s0.g_space', 'available', 'absolute', 1, 1048576],
+ ['s0.g_bytes', 'allocated', 'absolute', -1, 1048576]],
+ 'options': [None, 'Memory usage', 'megabytes', 'Memory usage', 'varnish.memory_usage', 'stacked']},
+ 'session':
+ {'lines': [['sess_conn', 'sess_conn', 'incremental', 1, 1],
+ ['client_req', 'client_requests', 'incremental', 1, 1],
+ ['client_conn', 'client_conn', 'incremental', 1, 1],
+ ['client_drop', 'client_drop', 'incremental', 1, 1],
+ ['sess_dropped', 'sess_dropped', 'incremental', 1, 1]],
+ 'options': [None, 'Sessions', 'units', 'Client metrics', 'varnish.session', 'line']},
+ 'threads':
+ {'lines': [['threads', None, 'absolute', 1, 1],
+ ['threads_created', 'created', 'incremental', 1, 1],
+ ['threads_failed', 'failed', 'incremental', 1, 1],
+ ['threads_limited', 'limited', 'incremental', 1, 1],
+ ['thread_queue_len', 'queue_len', 'incremental', 1, 1],
+ ['sess_queued', 'sess_queued', 'incremental', 1, 1]],
+ 'options': [None, 'Thread status', 'threads', 'Thread-related metrics', 'varnish.threads', 'line']},
+ 'uptime':
+ {'lines': [['uptime', None, 'absolute', 1, 1]],
+ 'options': [None, 'Varnish uptime', 'seconds', 'Uptime', 'varnish.uptime', 'line']}
+}
+
+DIRECTORIES = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ try:
+ self.varnish = [''.join([directory, 'varnishstat']) for directory in DIRECTORIES
+ if is_executable(''.join([directory, 'varnishstat']), X_OK)][0]
+ except IndexError:
+ self.varnish = False
+ self.rgx_all = compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
+ # Could be
+ # VBE.boot.super_backend.pipe_hdrbyte (new)
+ # or
+ # VBE.default2(127.0.0.2,,81).bereq_bodybytes (old)
+ # Regex result: [('super_backend', 'beresp_hdrbytes', '0'), ('super_backend', 'beresp_bodybytes', '0')]
+ self.rgx_bck = (compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)'),
+ compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)'))
+ self.cache_prev = list()
+
+ def check(self):
+ # Cant start without 'varnishstat' command
+ if not self.varnish:
+ self.error('\'varnishstat\' command was not found in %s or not executable by netdata' % DIRECTORIES)
+ return False
+
+ # If command is present and we can execute it we need to make sure..
+ # 1. STDOUT is not empty
+ reply = self._get_raw_data()
+ if not reply:
+ self.error('No output from \'varnishstat\' (not enough privileges?)')
+ return False
+
+ # 2. Output is parsable (list is not empty after regex findall)
+ is_parsable = self.rgx_all.findall(reply)
+ if not is_parsable:
+ self.error('Cant parse output...')
+ return False
+
+ # We need to find the right regex for backend parse
+ self.backend_list = self.rgx_bck[0].findall(reply)[::2]
+ if self.backend_list:
+ self.rgx_bck = self.rgx_bck[0]
+ else:
+ self.backend_list = self.rgx_bck[1].findall(reply)[::2]
+ self.rgx_bck = self.rgx_bck[1]
+
+ # We are about to start!
+ self.create_charts()
+
+ self.info('Plugin was started successfully')
+ return True
+
+ def _get_raw_data(self):
+ try:
+ reply = Popen([self.varnish, '-1'], stdout=PIPE, stderr=PIPE, shell=False)
+ except OSError:
+ return None
+
+ raw_data = reply.communicate()[0]
+
+ if not raw_data:
+ return None
+
+ return raw_data
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ data_all = self.rgx_all.findall(raw_data)
+ data_backend = self.rgx_bck.findall(raw_data)
+
+ if not data_all:
+ return None
+
+ # 1. ALL data from 'varnishstat -1'. t - type(MAIN, MEMPOOL etc)
+ to_netdata = {k: int(v) for t, k, v in data_all}
+
+ # 2. ADD backend statistics
+ to_netdata.update({'_'.join([n, k]): int(v) for n, k, v in data_backend})
+
+ # 3. ADD additional keys to dict
+ # 3.1 Cache hit/miss/hitpass OVERALL in percent
+ cache_summary = sum([to_netdata.get('cache_hit', 0), to_netdata.get('cache_miss', 0),
+ to_netdata.get('cache_hitpass', 0)])
+ to_netdata['cache_hit_perc'] = find_percent(to_netdata.get('cache_hit', 0), cache_summary, 10000)
+ to_netdata['cache_miss_perc'] = find_percent(to_netdata.get('cache_miss', 0), cache_summary, 10000)
+ to_netdata['cache_hitpass_perc'] = find_percent(to_netdata.get('cache_hitpass', 0), cache_summary, 10000)
+
+ # 3.2 Cache hit/miss/hitpass CURRENT in percent
+ if self.cache_prev:
+ cache_summary = sum([to_netdata.get('cache_hit', 0), to_netdata.get('cache_miss', 0),
+ to_netdata.get('cache_hitpass', 0)]) - sum(self.cache_prev)
+ to_netdata['cache_hit_cperc'] = find_percent(to_netdata.get('cache_hit', 0) - self.cache_prev[0], cache_summary, 10000)
+ to_netdata['cache_miss_cperc'] = find_percent(to_netdata.get('cache_miss', 0) - self.cache_prev[1], cache_summary, 10000)
+ to_netdata['cache_hitpass_cperc'] = find_percent(to_netdata.get('cache_hitpass', 0) - self.cache_prev[2], cache_summary, 10000)
+ else:
+ to_netdata['cache_hit_cperc'] = 0
+ to_netdata['cache_miss_cperc'] = 0
+ to_netdata['cache_hitpass_cperc'] = 0
+
+ self.cache_prev = [to_netdata.get('cache_hit', 0), to_netdata.get('cache_miss', 0), to_netdata.get('cache_hitpass', 0)]
+
+ # 3.3 Problems summary chart
+ for elem in ['backend_busy', 'backend_unhealthy', 'esi_errors', 'esi_warnings', 'losthdr', 'sess_drop', 'sc_pipe_overflow',
+ 'sess_fail', 'sess_pipe_overflow', 'threads_destroyed', 'threads_failed', 'threads_limited', 'thread_queue_len']:
+ if to_netdata.get(elem) is not None:
+ to_netdata[''.join([elem, '_b'])] = to_netdata.get(elem)
+
+ # Ready steady go!
+ return to_netdata
+
+ def create_charts(self):
+ # If 'all_charts' is true...ALL charts are displayed. If no only default + 'extra_charts'
+ #if self.configuration.get('all_charts'):
+ # self.order = EXTRA_ORDER
+ #else:
+ # try:
+ # extra_charts = list(filter(lambda chart: chart in EXTRA_ORDER, self.extra_charts.split()))
+ # except (AttributeError, NameError, ValueError):
+ # self.error('Extra charts disabled.')
+ # extra_charts = []
+
+ self.order = ORDER[:]
+ #self.order.extend(extra_charts)
+
+ # Create static charts
+ #self.definitions = {chart: values for chart, values in CHARTS.items() if chart in self.order}
+ self.definitions = CHARTS
+
+ # Create dynamic backend charts
+ if self.backend_list:
+ for backend in self.backend_list:
+ self.order.insert(0, ''.join([backend[0], '_resp_stats']))
+ self.definitions.update({''.join([backend[0], '_resp_stats']): {
+ 'options': [None,
+ '%s response statistics' % backend[0].capitalize(),
+ "kilobit/s",
+ 'Backend response',
+ 'varnish.backend',
+ 'area'],
+ 'lines': [[''.join([backend[0], '_beresp_hdrbytes']),
+ 'header', 'incremental', 8, 1000],
+ [''.join([backend[0], '_beresp_bodybytes']),
+ 'body', 'incremental', -8, 1000]]}})
+
+
+def find_percent(value1, value2, multiply):
+ # If value2 is 0 return 0
+ if not value2:
+ return 0
+ else:
+ return round(float(value1) / float(value2) * multiply)