summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLennart Weller <lhw@ring0.de>2017-09-17 22:17:45 +0000
committerLennart Weller <lhw@ring0.de>2017-09-17 22:17:45 +0000
commit7ee3962eaca4214264964ae32c86de457a90e382 (patch)
tree5a784485351593ac6d74fa9fd9bbd8fe7c9c3fe0
parentmaintscript eludes me (diff)
parentNew upstream version 1.8.0+dfsg (diff)
downloadnetdata-7ee3962eaca4214264964ae32c86de457a90e382.tar.xz
netdata-7ee3962eaca4214264964ae32c86de457a90e382.zip
Updated version 1.8.0+dfsg from 'upstream/1.8.0+dfsg'
with Debian dir 412de09d9bca38fe00146ef090f9e53f76493882
-rw-r--r--.codacy.yml15
-rw-r--r--.codeclimate.yml5
-rw-r--r--.gitignore8
-rw-r--r--ChangeLog6
-rw-r--r--LICENSE-REDISTRIBUTED.md10
-rw-r--r--Makefile.am30
-rw-r--r--Makefile.in30
-rw-r--r--README.md53
-rw-r--r--conf.d/Makefile.am35
-rw-r--r--conf.d/Makefile.in85
-rw-r--r--conf.d/health.d/cpu.conf8
-rw-r--r--conf.d/health.d/disks.conf16
-rw-r--r--conf.d/health.d/entropy.conf2
-rw-r--r--conf.d/health.d/ipc.conf6
-rw-r--r--conf.d/health.d/memory.conf8
-rw-r--r--conf.d/health.d/net.conf28
-rw-r--r--conf.d/health.d/netfilter.conf6
-rw-r--r--conf.d/health.d/qos.conf4
-rw-r--r--conf.d/health.d/ram.conf6
-rw-r--r--conf.d/health.d/softnet.conf7
-rw-r--r--conf.d/health.d/swap.conf8
-rw-r--r--conf.d/health.d/tcp_resets.conf13
-rw-r--r--conf.d/health.d/udp_errors.conf9
-rw-r--r--conf.d/health_alarm_notify.conf16
-rw-r--r--conf.d/node.d/fronius.conf.md2
-rw-r--r--conf.d/node.d/stiebeleltron.conf.md453
-rw-r--r--conf.d/python.d.conf1
-rw-r--r--conf.d/python.d/chrony.conf72
-rw-r--r--conf.d/python.d/postgres.conf3
-rw-r--r--conf.d/python.d/tomcat.conf5
-rw-r--r--conf.d/python.d/web_log.conf2
-rw-r--r--config.h.in3
-rw-r--r--configs.signatures28
-rwxr-xr-xconfigure327
-rw-r--r--configure.ac48
-rw-r--r--contrib/debian/changelog4
-rwxr-xr-xcoverity-scan.sh4
-rw-r--r--diagrams/Makefile.am13
-rw-r--r--diagrams/Makefile.in467
-rw-r--r--diagrams/netdata-overview.xml2
-rw-r--r--installer/functions.sh4
-rw-r--r--makeself/Makefile.am25
-rw-r--r--makeself/Makefile.in479
-rwxr-xr-xmakeself/build-x86_64-static.sh16
-rwxr-xr-xmakeself/build.sh24
-rwxr-xr-xmakeself/functions.sh3
-rwxr-xr-xmakeself/install-alpine-packages.sh (renamed from makeself/setup-x86_64-static.sh)0
-rwxr-xr-xmakeself/install-or-update.sh8
-rwxr-xr-xmakeself/jobs/50-bash-4.4.install.sh5
-rwxr-xr-xmakeself/jobs/50-curl-7.53.1.install.sh5
-rwxr-xr-xmakeself/jobs/50-fping-4.0.install.sh5
-rwxr-xr-xmakeself/jobs/70-netdata-git.install.sh15
-rwxr-xr-xmakeself/run-all-jobs.sh7
-rwxr-xr-xnetdata-installer.sh24
-rw-r--r--netdata.spec9
-rw-r--r--netdata.spec.in3
-rw-r--r--node.d/Makefile.am17
-rw-r--r--node.d/Makefile.in67
-rw-r--r--node.d/README.md55
-rw-r--r--node.d/fronius.node.js311
-rw-r--r--node.d/node_modules/asn1-ber.js6
-rw-r--r--node.d/node_modules/asn1.js20
-rw-r--r--node.d/node_modules/ber/errors.js13
-rw-r--r--node.d/node_modules/ber/index.js27
-rw-r--r--node.d/node_modules/ber/reader.js261
-rw-r--r--node.d/node_modules/ber/types.js36
-rw-r--r--node.d/node_modules/ber/writer.js316
-rw-r--r--node.d/node_modules/lib/ber/errors.js9
-rw-r--r--node.d/node_modules/lib/ber/index.js17
-rw-r--r--node.d/node_modules/lib/ber/reader.js269
-rw-r--r--node.d/node_modules/lib/ber/types.js34
-rw-r--r--node.d/node_modules/lib/ber/writer.js317
-rw-r--r--node.d/node_modules/net-snmp.js2
-rw-r--r--node.d/node_modules/netdata.js8
-rw-r--r--node.d/snmp.node.js37
-rw-r--r--node.d/stiebeleltron.node.js196
-rw-r--r--package.json23
-rwxr-xr-xplugins.d/alarm-notify.sh221
-rwxr-xr-xplugins.d/charts.d.plugin6
-rwxr-xr-xplugins.d/fping.plugin1
-rw-r--r--python.d/Makefile.am66
-rw-r--r--python.d/Makefile.in285
-rw-r--r--python.d/README.md32
-rw-r--r--python.d/chrony.chart.py105
-rw-r--r--python.d/cpufreq.chart.py30
-rw-r--r--python.d/cpuidle.chart.py30
-rw-r--r--python.d/dovecot.chart.py4
-rw-r--r--python.d/elasticsearch.chart.py417
-rw-r--r--python.d/fail2ban.chart.py9
-rw-r--r--python.d/haproxy.chart.py16
-rw-r--r--python.d/mdstat.chart.py2
-rw-r--r--python.d/mysql.chart.py5
-rw-r--r--python.d/postgres.chart.py45
-rw-r--r--python.d/python_modules/base.py171
-rw-r--r--python.d/python_modules/urllib3/__init__.py97
-rw-r--r--python.d/python_modules/urllib3/_collections.py314
-rw-r--r--python.d/python_modules/urllib3/connection.py373
-rw-r--r--python.d/python_modules/urllib3/connectionpool.py899
-rw-r--r--python.d/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--python.d/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--python.d/python_modules/urllib3/contrib/_securetransport/bindings.py590
-rw-r--r--python.d/python_modules/urllib3/contrib/_securetransport/low_level.py343
-rw-r--r--python.d/python_modules/urllib3/contrib/appengine.py296
-rw-r--r--python.d/python_modules/urllib3/contrib/ntlmpool.py112
-rw-r--r--python.d/python_modules/urllib3/contrib/pyopenssl.py457
-rw-r--r--python.d/python_modules/urllib3/contrib/securetransport.py807
-rw-r--r--python.d/python_modules/urllib3/contrib/socks.py188
-rw-r--r--python.d/python_modules/urllib3/exceptions.py246
-rw-r--r--python.d/python_modules/urllib3/fields.py178
-rw-r--r--python.d/python_modules/urllib3/filepost.py94
-rw-r--r--python.d/python_modules/urllib3/packages/__init__.py5
-rw-r--r--python.d/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--python.d/python_modules/urllib3/packages/backports/makefile.py53
-rw-r--r--python.d/python_modules/urllib3/packages/ordered_dict.py259
-rw-r--r--python.d/python_modules/urllib3/packages/six.py868
-rw-r--r--python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py19
-rw-r--r--python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py157
-rw-r--r--python.d/python_modules/urllib3/poolmanager.py440
-rw-r--r--python.d/python_modules/urllib3/request.py148
-rw-r--r--python.d/python_modules/urllib3/response.py622
-rw-r--r--python.d/python_modules/urllib3/util/__init__.py54
-rw-r--r--python.d/python_modules/urllib3/util/connection.py130
-rw-r--r--python.d/python_modules/urllib3/util/request.py118
-rw-r--r--python.d/python_modules/urllib3/util/response.py81
-rw-r--r--python.d/python_modules/urllib3/util/retry.py401
-rw-r--r--python.d/python_modules/urllib3/util/selectors.py581
-rw-r--r--python.d/python_modules/urllib3/util/ssl_.py337
-rw-r--r--python.d/python_modules/urllib3/util/timeout.py242
-rw-r--r--python.d/python_modules/urllib3/util/url.py230
-rw-r--r--python.d/python_modules/urllib3/util/wait.py40
-rw-r--r--python.d/rabbitmq.chart.py119
-rw-r--r--python.d/redis.chart.py36
-rw-r--r--python.d/tomcat.chart.py144
-rw-r--r--python.d/web_log.chart.py57
-rw-r--r--src/Makefile.am22
-rw-r--r--src/Makefile.in106
-rw-r--r--src/appconfig.c2
-rw-r--r--src/apps_plugin.c2
-rw-r--r--src/cgroup-network.c340
-rw-r--r--src/common.c15
-rw-r--r--src/common.h4
-rw-r--r--src/daemon.c101
-rw-r--r--src/daemon.h5
-rw-r--r--src/freebsd_getifaddrs.c36
-rw-r--r--src/freebsd_sysctl.c72
-rw-r--r--src/freeipmi_plugin.c2
-rw-r--r--src/global_statistics.c4
-rw-r--r--src/health.c53
-rw-r--r--src/health_config.c60
-rw-r--r--src/health_log.c22
-rw-r--r--src/inlined.h38
-rw-r--r--src/macos_fw.c4
-rw-r--r--src/macos_sysctl.c6
-rw-r--r--src/main.c77
-rw-r--r--src/plugin_freebsd.c1
-rw-r--r--src/plugin_freebsd.h1
-rw-r--r--src/plugin_proc.h4
-rw-r--r--src/plugin_proc_diskspace.c8
-rw-r--r--src/plugin_tc.c2
-rw-r--r--src/plugins_d.c58
-rw-r--r--src/popen.c34
-rw-r--r--src/proc_diskstats.c7
-rw-r--r--src/proc_interrupts.c92
-rw-r--r--src/proc_loadavg.c62
-rw-r--r--src/proc_meminfo.c288
-rw-r--r--src/proc_net_dev.c426
-rw-r--r--src/proc_net_ip_vs_stats.c4
-rw-r--r--src/proc_net_netstat.c509
-rw-r--r--src/proc_net_snmp.c2
-rw-r--r--src/proc_net_snmp6.c12
-rw-r--r--src/proc_softirqs.c107
-rw-r--r--src/proc_stat.c573
-rw-r--r--src/proc_sys_kernel_random_entropy_avail.c23
-rw-r--r--src/proc_uptime.c29
-rw-r--r--src/proc_vmstat.c131
-rw-r--r--src/rrd.h3
-rw-r--r--src/rrd2json.c10
-rw-r--r--src/rrddim.c4
-rw-r--r--src/rrdpush.c160
-rw-r--r--src/rrdset.c5
-rw-r--r--src/signals.c168
-rw-r--r--src/signals.h10
-rw-r--r--src/socket.c151
-rw-r--r--src/socket.h5
-rw-r--r--src/statsd.c49
-rw-r--r--src/storage_number.c76
-rw-r--r--src/storage_number.h16
-rw-r--r--src/sys_devices_system_edac_mc.c61
-rw-r--r--src/sys_fs_cgroup.c136
-rw-r--r--src/sys_kernel_mm_ksm.c173
-rw-r--r--src/unit_test.c60
-rw-r--r--src/unit_test.h1
-rw-r--r--src/web_api_v1.c10
-rw-r--r--src/web_buffer.c43
-rw-r--r--src/web_buffer.h1
-rw-r--r--src/web_buffer_svg.c11
-rw-r--r--src/web_client.c103
-rw-r--r--src/web_client.h77
-rw-r--r--src/web_server.c28
-rw-r--r--system/netdata-init-d.in2
-rw-r--r--system/netdata-openrc.in2
-rw-r--r--system/netdata.service.in30
-rw-r--r--tests/Makefile.am18
-rw-r--r--tests/Makefile.in472
-rw-r--r--tests/README.md135
-rw-r--r--tests/node.d/fronius.chart.spec.js161
-rw-r--r--tests/node.d/fronius.parse.spec.js305
-rw-r--r--tests/node.d/fronius.process.spec.js74
-rw-r--r--tests/node.d/fronius.validation.spec.js154
-rw-r--r--tests/web/easypiechart.chart.spec.js39
-rw-r--r--tests/web/easypiechart.percentage.spec.js142
-rw-r--r--tests/web/fixtures/easypiechart.chart.fixture1.html6
-rw-r--r--tests/web/karma.conf.js110
-rw-r--r--tests/web/lib/jasmine-jquery.js841
-rw-r--r--web/dashboard.css1
-rw-r--r--web/dashboard.html2
-rw-r--r--web/dashboard.js73
-rw-r--r--web/dashboard.slate.css1
-rw-r--r--web/dashboard_info.js189
-rw-r--r--web/goto-host-from-alarm.html2
-rw-r--r--web/index.html12
-rw-r--r--web/registry.html2
-rw-r--r--web/tv.html2
-rw-r--r--web/version.txt2
224 files changed, 20601 insertions, 2982 deletions
diff --git a/.codacy.yml b/.codacy.yml
new file mode 100644
index 000000000..0e3f44365
--- /dev/null
+++ b/.codacy.yml
@@ -0,0 +1,15 @@
+---
+exclude_paths:
+ - python.d/python_modules/pyyaml2/**
+ - python.d/python_modules/pyyaml3/**
+ - python.d/python_modules/urllib3/**
+ - python.d/python_modules/lm_sensors.py
+ - web/css/**
+ - web/lib/**
+ - web/old/**
+ - node.d/node_modules/lib/**
+ - node.d/node_modules/asn1-ber.js
+ - node.d/node_modules/net-snmp.js
+ - node.d/node_modules/pixl-xml.js
+ - node.d/node_modules/extend.js
+ - tests/**
diff --git a/.codeclimate.yml b/.codeclimate.yml
index 02b452647..c3d4daa45 100644
--- a/.codeclimate.yml
+++ b/.codeclimate.yml
@@ -63,8 +63,9 @@ exclude_paths:
- web/old/
- python.d/python_modules/pyyaml2/
- python.d/python_modules/pyyaml3/
-- node.d/node_modules/ber/
-- node.d/node_modules/asn1.js
+- python.d/python_modules/urllib3/
+- node.d/node_modules/lib/
+- node.d/node_modules/asn1-ber.js
- node.d/node_modules/extend.js
- node.d/node_modules/pixl-xml.js
- node.d/node_modules/net-snmp.js
diff --git a/.gitignore b/.gitignore
index 00c7d6d68..40b6b1d3d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,6 +68,10 @@ web/gadget.xml
web/index_new.html
web/version.txt
+# related to karma/javascript/node
+node_modules/
+coverage/
+
system/netdata-lsb
system/netdata-openrc
system/netdata-init-d
@@ -103,3 +107,7 @@ diagrams/*.atxt
diagrams/plantuml.jar
netdata.cppcheck
+
+profile/statsd-stress
+src/cgroup-network
+vgcore.*
diff --git a/ChangeLog b/ChangeLog
index d3c20b804..5d74b8aa3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,9 @@
+netdata (1.8.0) - 2017-09-17
+
+ This is mainly a bugfix release.
+ Please check full changelog at github.
+
+
netdata (1.7.0) - 2017-07-16
* netdata is still spreading fast
diff --git a/LICENSE-REDISTRIBUTED.md b/LICENSE-REDISTRIBUTED.md
index 79891e5a3..513d53f5d 100644
--- a/LICENSE-REDISTRIBUTED.md
+++ b/LICENSE-REDISTRIBUTED.md
@@ -127,10 +127,11 @@ connectivity is not available.
[MIT License](https://github.com/stephenwvickers/node-net-snmp)
-- [node-asn1](https://github.com/mcavage/node-asn1)
+- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber)
+ Copyright 2017, Stephen Vickers
Copyright 2011, Mark Cavage
- [MIT License](https://github.com/mcavage/node-asn1)
+ [MIT License](https://github.com/stephenwvickers/node-asn1-ber)
- [pixl-xml](https://github.com/jhuckaby/pixl-xml)
@@ -148,3 +149,8 @@ connectivity is not available.
Copyright 2006, Kirill Simonov
[MIT License](http://pyyaml.org)
+- [urllib3](https://github.com/shazow/urllib3)
+
+ Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt)
+ [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt)
+
diff --git a/Makefile.am b/Makefile.am
index 7d5bc57f8..2ea9e40f9 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -18,6 +18,7 @@ MAINTAINERCLEANFILES= \
EXTRA_DIST = \
.gitignore \
+ .codacy.yml \
.codeclimate.yml \
.csslintrc \
.eslintignore \
@@ -42,12 +43,13 @@ EXTRA_DIST = \
LICENSE-REDISTRIBUTED.md \
COPYING \
autogen.sh \
- tests/stress.sh \
$(NULL)
SUBDIRS = \
charts.d \
conf.d \
+ diagrams \
+ makeself \
node.d \
plugins.d \
python.d \
@@ -55,45 +57,23 @@ SUBDIRS = \
system \
web \
contrib \
+ tests \
$(NULL)
dist_noinst_DATA= \
- diagrams/config.puml \
- diagrams/registry.puml \
- diagrams/netdata-for-ephemeral-nodes.xml \
- diagrams/netdata-proxies-example.xml \
- diagrams/netdata-overview.xml \
configs.signatures \
Dockerfile \
netdata.spec \
+ package.json \
$(NULL)
# until integrated within build
# should be proper init.d/openrc/systemd usable
dist_noinst_SCRIPTS= \
- diagrams/build.sh \
coverity-scan.sh \
docker-build.sh \
kickstart.sh \
kickstart-static64.sh \
netdata-installer.sh \
installer/functions.sh \
- makeself/build.sh \
- makeself/makeself.sh \
- makeself/makeself-license.txt \
- makeself/setup-x86_64-static.sh \
- makeself/post-installer.sh \
- makeself/jobs/10-prepare-destination.install.sh \
- makeself/jobs/50-curl-7.53.1.install.sh \
- makeself/jobs/50-bash-4.4.install.sh \
- makeself/jobs/50-fping-4.0.install.sh \
- makeself/jobs/70-netdata-git.install.sh \
- makeself/jobs/99-makeself.install.sh \
- makeself/run-all-jobs.sh \
- makeself/install-or-update.sh \
- makeself/build-x86_64-static.sh \
- makeself/makeself-header.sh \
- makeself/makeself-help-header.txt \
- makeself/makeself.lsm \
- makeself/functions.sh \
$(NULL)
diff --git a/Makefile.in b/Makefile.in
index aae24649b..5abcfe6f0 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -367,6 +367,7 @@ MAINTAINERCLEANFILES = \
EXTRA_DIST = \
.gitignore \
+ .codacy.yml \
.codeclimate.yml \
.csslintrc \
.eslintignore \
@@ -391,12 +392,13 @@ EXTRA_DIST = \
LICENSE-REDISTRIBUTED.md \
COPYING \
autogen.sh \
- tests/stress.sh \
$(NULL)
SUBDIRS = \
charts.d \
conf.d \
+ diagrams \
+ makeself \
node.d \
plugins.d \
python.d \
@@ -404,48 +406,26 @@ SUBDIRS = \
system \
web \
contrib \
+ tests \
$(NULL)
dist_noinst_DATA = \
- diagrams/config.puml \
- diagrams/registry.puml \
- diagrams/netdata-for-ephemeral-nodes.xml \
- diagrams/netdata-proxies-example.xml \
- diagrams/netdata-overview.xml \
configs.signatures \
Dockerfile \
netdata.spec \
+ package.json \
$(NULL)
# until integrated within build
# should be proper init.d/openrc/systemd usable
dist_noinst_SCRIPTS = \
- diagrams/build.sh \
coverity-scan.sh \
docker-build.sh \
kickstart.sh \
kickstart-static64.sh \
netdata-installer.sh \
installer/functions.sh \
- makeself/build.sh \
- makeself/makeself.sh \
- makeself/makeself-license.txt \
- makeself/setup-x86_64-static.sh \
- makeself/post-installer.sh \
- makeself/jobs/10-prepare-destination.install.sh \
- makeself/jobs/50-curl-7.53.1.install.sh \
- makeself/jobs/50-bash-4.4.install.sh \
- makeself/jobs/50-fping-4.0.install.sh \
- makeself/jobs/70-netdata-git.install.sh \
- makeself/jobs/99-makeself.install.sh \
- makeself/run-all-jobs.sh \
- makeself/install-or-update.sh \
- makeself/build-x86_64-static.sh \
- makeself/makeself-header.sh \
- makeself/makeself-help-header.txt \
- makeself/makeself.lsm \
- makeself/functions.sh \
$(NULL)
all: config.h
diff --git a/README.md b/README.md
index 15102b1d7..f33010e59 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# netdata [![Build Status](https://travis-ci.org/firehol/netdata.svg?branch=master)](https://travis-ci.org/firehol/netdata) [![Coverity Scan Build Status](https://scan.coverity.com/projects/9140/badge.svg)](https://scan.coverity.com/projects/firehol-netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=firehol/netdata&amp;utm_campaign=Badge_Grade) [![Code Climate](https://codeclimate.com/github/firehol/netdata/badges/gpa.svg)](https://codeclimate.com/github/firehol/netdata) [![license](https://img.shields.io/github/license/firehol/netdata.svg)](LICENSE)
+# netdata [![Build Status](https://travis-ci.org/firehol/netdata.svg?branch=master)](https://travis-ci.org/firehol/netdata) [![Coverity Scan Build Status](https://scan.coverity.com/projects/9140/badge.svg)](https://scan.coverity.com/projects/firehol-netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=firehol/netdata&amp;utm_campaign=Badge_Grade) [![Code Climate](https://codeclimate.com/github/firehol/netdata/badges/gpa.svg)](https://codeclimate.com/github/firehol/netdata) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)
> *New to netdata? Here is a live demo: [http://my-netdata.io](http://my-netdata.io)*
**netdata** is a system for **distributed real-time performance and health monitoring**.
@@ -34,14 +34,18 @@ Netdata is featured at <b><a href="https://octoverse.github.com/" target="_blank
<a href="https://octoverse.github.com/" target="_blank"><img src="https://cloud.githubusercontent.com/assets/2662304/21743260/23ebe62c-d507-11e6-80c0-76b95f53e464.png"/></a>
</p>
-`Mar 20th, 2017` - **[netdata v1.6.0 released!](https://github.com/firehol/netdata/releases)**
-
- - central netdata is here! headless collectors, proxies, streaming of metrics, etc.
- - [monitoring ephemeral nodes (auto-scaled VMs)](https://github.com/firehol/netdata/wiki/monitoring-ephemeral-nodes)
- - [monitoring ephemeral containers and VM guests](https://github.com/firehol/netdata/wiki/monitoring-ephemeral-containers)
- - [monitoring web servers](https://github.com/firehol/netdata/wiki/The-spectacles-of-a-web-server-log-file)
- - apps.plugin ported for FreeBSD
- - [monitoring IPMI](https://github.com/firehol/netdata/wiki/monitoring-IPMI)
+`Jul 16th, 2017` - **[netdata v1.7.0 released!](https://github.com/firehol/netdata/releases)**
+
+ - netdata is now a fully featured **statsd** server - [read more here](https://github.com/firehol/netdata/wiki/statsd)
+ - netdata now monitors **ZFS** of Linux and FreeBSD
+ - netdata now monitors **ElasticSearch**
+ - netdata now monitors **RabbitMQ**
+ - netdata now monitors **Go applications** (via `expvar`) - [read more here](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications)
+ - netdata now monitors **squid logs**
+ - netdata now monitors **samba**
+ - several **backends** improvements and enhancements, including **metrics filtering**
+ - **prometheus** backend support has been rewritten
+ - alarm notifications now support **custom hooks** to execute shell commands in parallel with all other notification methods
- dozens of new and improved plugins
- dozens of new and improved alarms
- dozens more improvements and performance optimizations
@@ -56,50 +60,50 @@ Netdata is featured at <b><a href="https://octoverse.github.com/" target="_blank
- **Stunning interactive bootstrap dashboards**<br/>
mouse and touch friendly, in 2 themes: dark, light
-
+
- **Amazingly fast**<br/>
responds to all queries in less than 0.5 ms per metric,
even on low-end hardware
-
+
- **Highly efficient**<br/>
collects thousands of metrics per server per second,
with just 1% CPU utilization of a single core, a few MB of RAM and no disk I/O at all
-
+
- **Sophisticated alarming**<br/>
hundreds of alarms, **out of the box**!<br/>
supports dynamic thresholds, hysteresis, alarm templates,
multiple role-based notification methods (such as email, slack.com,
pushover.net, pushbullet.com, telegram.org, twilio.com, messagebird.com)
-
+
- **Extensible**<br/>
you can monitor anything you can get a metric for,
using its Plugin API (anything can be a netdata plugin,
BASH, python, perl, node.js, java, Go, ruby, etc)
-
+
- **Embeddable**<br/>
it can run anywhere a Linux kernel runs (even IoT)
and its charts can be embedded on your web pages too
-
+
- **Customizable**<br/>
custom dashboards can be built using simple HTML (no javascript necessary)
-
+
- **Zero configuration**<br/>
auto-detects everything, it can collect up to 5000 metrics
per server out of the box
-
+
- **Zero dependencies**<br/>
it is even its own web server, for its static web files and its web API
-
+
- **Zero maintenance**<br/>
you just run it, it does the rest
-
+
- **scales to infinity**<br/>
requiring minimal central resources
-
+
- **several operating modes**<br/>
autonomous host monitoring, headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations.
Each node may have different metrics retention policy and run with or without health monitoring.
-
+
- **time-series back-ends supported**<br/>
can archive its metrics on `graphite`, `opentsdb`, `prometheus`, json document DBs, in the same or lower detail
(lower: to prevent it from congesting these servers due to the amount of data collected)
@@ -310,6 +314,13 @@ It should run on **any Linux** system (including IoT). It has been tested on:
---
+## Interaction with netdata
+
+After installation, you can interact with netdata using **[CLI](https://github.com/firehol/netdata/wiki/Command-Line-Options)** and web dashboards.
+The default port of dashboard is 19999. To access the web dashboard on localhost, use: http://localhost:19999
+
+---
+
## Documentation
Check the **[netdata wiki](https://github.com/firehol/netdata/wiki)**.
diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am
index 4cbecb56a..7a0786678 100644
--- a/conf.d/Makefile.am
+++ b/conf.d/Makefile.am
@@ -21,6 +21,7 @@ dist_nodeconfig_DATA = \
node.d/named.conf.md \
node.d/sma_webbox.conf.md \
node.d/snmp.conf.md \
+ node.d/stiebeleltron.conf.md \
$(NULL)
pythonconfigdir=$(configdir)/python.d
@@ -28,6 +29,7 @@ dist_pythonconfig_DATA = \
python.d/apache.conf \
python.d/apache_cache.conf \
python.d/bind_rndc.conf \
+ python.d/chrony.conf \
python.d/cpufreq.conf \
python.d/dns_query_time.conf \
python.d/dovecot.conf \
@@ -69,45 +71,40 @@ dist_healthconfig_DATA = \
health.d/apache.conf \
health.d/backend.conf \
health.d/bind_rndc.conf \
+ health.d/cpu.conf \
+ health.d/disks.conf \
health.d/elasticsearch.conf \
+ health.d/entropy.conf \
health.d/fping.conf \
health.d/haproxy.conf \
+ health.d/ipc.conf \
health.d/ipfs.conf \
health.d/ipmi.conf \
health.d/isc_dhcpd.conf \
health.d/lighttpd.conf \
health.d/mdstat.conf \
health.d/memcached.conf \
+ health.d/memory.conf \
+ health.d/mongodb.conf \
health.d/mysql.conf \
health.d/named.conf \
- health.d/mongodb.conf \
- health.d/nginx.conf \
- health.d/postgres.conf \
- health.d/redis.conf \
- health.d/retroshare.conf \
- health.d/squid.conf \
- health.d/varnish.conf \
- health.d/web_log.conf \
- health.d/zfs.conf \
- $(NULL)
-
-if LINUX
-dist_healthconfig_DATA += \
- health.d/cpu.conf \
- health.d/disks.conf \
- health.d/entropy.conf \
- health.d/ipc.conf \
- health.d/memory.conf \
health.d/net.conf \
health.d/netfilter.conf \
+ health.d/nginx.conf \
+ health.d/postgres.conf \
health.d/qos.conf \
health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/retroshare.conf \
health.d/softnet.conf \
+ health.d/squid.conf \
health.d/swap.conf \
health.d/tcp_resets.conf \
health.d/udp_errors.conf \
+ health.d/varnish.conf \
+ health.d/web_log.conf \
+ health.d/zfs.conf \
$(NULL)
-endif LINUX
chartsconfigdir=$(configdir)/charts.d
dist_chartsconfig_DATA = \
diff --git a/conf.d/Makefile.in b/conf.d/Makefile.in
index 7a1e300e0..3d7084ed1 100644
--- a/conf.d/Makefile.in
+++ b/conf.d/Makefile.in
@@ -78,26 +78,10 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-@LINUX_TRUE@am__append_1 = \
-@LINUX_TRUE@ health.d/cpu.conf \
-@LINUX_TRUE@ health.d/disks.conf \
-@LINUX_TRUE@ health.d/entropy.conf \
-@LINUX_TRUE@ health.d/ipc.conf \
-@LINUX_TRUE@ health.d/memory.conf \
-@LINUX_TRUE@ health.d/net.conf \
-@LINUX_TRUE@ health.d/netfilter.conf \
-@LINUX_TRUE@ health.d/qos.conf \
-@LINUX_TRUE@ health.d/ram.conf \
-@LINUX_TRUE@ health.d/softnet.conf \
-@LINUX_TRUE@ health.d/swap.conf \
-@LINUX_TRUE@ health.d/tcp_resets.conf \
-@LINUX_TRUE@ health.d/udp_errors.conf \
-@LINUX_TRUE@ $(NULL)
-
subdir = conf.d
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(dist_chartsconfig_DATA) $(dist_config_DATA) \
- $(am__dist_healthconfig_DATA_DIST) $(dist_nodeconfig_DATA) \
+ $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \
$(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
@@ -164,21 +148,6 @@ am__installdirs = "$(DESTDIR)$(chartsconfigdir)" \
"$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" \
"$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" \
"$(DESTDIR)$(statsdconfigdir)"
-am__dist_healthconfig_DATA_DIST = health.d/apache.conf \
- health.d/backend.conf health.d/bind_rndc.conf \
- health.d/elasticsearch.conf health.d/fping.conf \
- health.d/haproxy.conf health.d/ipfs.conf health.d/ipmi.conf \
- health.d/isc_dhcpd.conf health.d/lighttpd.conf \
- health.d/mdstat.conf health.d/memcached.conf \
- health.d/mysql.conf health.d/named.conf health.d/mongodb.conf \
- health.d/nginx.conf health.d/postgres.conf health.d/redis.conf \
- health.d/retroshare.conf health.d/squid.conf \
- health.d/varnish.conf health.d/web_log.conf health.d/zfs.conf \
- health.d/cpu.conf health.d/disks.conf health.d/entropy.conf \
- health.d/ipc.conf health.d/memory.conf health.d/net.conf \
- health.d/netfilter.conf health.d/qos.conf health.d/ram.conf \
- health.d/softnet.conf health.d/swap.conf \
- health.d/tcp_resets.conf health.d/udp_errors.conf
DATA = $(dist_chartsconfig_DATA) $(dist_config_DATA) \
$(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \
$(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA)
@@ -351,6 +320,7 @@ dist_nodeconfig_DATA = \
node.d/named.conf.md \
node.d/sma_webbox.conf.md \
node.d/snmp.conf.md \
+ node.d/stiebeleltron.conf.md \
$(NULL)
pythonconfigdir = $(configdir)/python.d
@@ -358,6 +328,7 @@ dist_pythonconfig_DATA = \
python.d/apache.conf \
python.d/apache_cache.conf \
python.d/bind_rndc.conf \
+ python.d/chrony.conf \
python.d/cpufreq.conf \
python.d/dns_query_time.conf \
python.d/dovecot.conf \
@@ -394,17 +365,45 @@ dist_pythonconfig_DATA = \
$(NULL)
healthconfigdir = $(configdir)/health.d
-dist_healthconfig_DATA = health.d/apache.conf health.d/backend.conf \
- health.d/bind_rndc.conf health.d/elasticsearch.conf \
- health.d/fping.conf health.d/haproxy.conf health.d/ipfs.conf \
- health.d/ipmi.conf health.d/isc_dhcpd.conf \
- health.d/lighttpd.conf health.d/mdstat.conf \
- health.d/memcached.conf health.d/mysql.conf \
- health.d/named.conf health.d/mongodb.conf health.d/nginx.conf \
- health.d/postgres.conf health.d/redis.conf \
- health.d/retroshare.conf health.d/squid.conf \
- health.d/varnish.conf health.d/web_log.conf health.d/zfs.conf \
- $(NULL) $(am__append_1)
+dist_healthconfig_DATA = \
+ health.d/apache.conf \
+ health.d/backend.conf \
+ health.d/bind_rndc.conf \
+ health.d/cpu.conf \
+ health.d/disks.conf \
+ health.d/elasticsearch.conf \
+ health.d/entropy.conf \
+ health.d/fping.conf \
+ health.d/haproxy.conf \
+ health.d/ipc.conf \
+ health.d/ipfs.conf \
+ health.d/ipmi.conf \
+ health.d/isc_dhcpd.conf \
+ health.d/lighttpd.conf \
+ health.d/mdstat.conf \
+ health.d/memcached.conf \
+ health.d/memory.conf \
+ health.d/mongodb.conf \
+ health.d/mysql.conf \
+ health.d/named.conf \
+ health.d/net.conf \
+ health.d/netfilter.conf \
+ health.d/nginx.conf \
+ health.d/postgres.conf \
+ health.d/qos.conf \
+ health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/retroshare.conf \
+ health.d/softnet.conf \
+ health.d/squid.conf \
+ health.d/swap.conf \
+ health.d/tcp_resets.conf \
+ health.d/udp_errors.conf \
+ health.d/varnish.conf \
+ health.d/web_log.conf \
+ health.d/zfs.conf \
+ $(NULL)
+
chartsconfigdir = $(configdir)/charts.d
dist_chartsconfig_DATA = \
charts.d/apache.conf \
diff --git a/conf.d/health.d/cpu.conf b/conf.d/health.d/cpu.conf
index 30a714097..db6285561 100644
--- a/conf.d/health.d/cpu.conf
+++ b/conf.d/health.d/cpu.conf
@@ -1,6 +1,10 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
template: 10min_cpu_usage
on: system.cpu
+ os: linux
+ hosts: *
lookup: average -10m unaligned of user,system,softirq,irq,guest
units: %
every: 1m
@@ -12,6 +16,8 @@ template: 10min_cpu_usage
template: 10min_cpu_iowait
on: system.cpu
+ os: linux
+ hosts: *
lookup: average -10m unaligned of iowait
units: %
every: 1m
@@ -23,6 +29,8 @@ template: 10min_cpu_iowait
template: 20min_steal_cpu
on: system.cpu
+ os: linux
+ hosts: *
lookup: average -20m unaligned of steal
units: %
every: 5m
diff --git a/conf.d/health.d/disks.conf b/conf.d/health.d/disks.conf
index 9548f9ee0..63053491e 100644
--- a/conf.d/health.d/disks.conf
+++ b/conf.d/health.d/disks.conf
@@ -1,3 +1,7 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+
# -----------------------------------------------------------------------------
# low disk space
@@ -7,6 +11,8 @@
template: disk_space_usage
on: disk.space
+ os: linux
+ hosts: *
families: *
calc: $used * 100 / ($avail + $used)
units: %
@@ -19,6 +25,8 @@ families: *
template: disk_inode_usage
on: disk.inodes
+ os: linux
+ hosts: *
families: *
calc: $used * 100 / ($avail + $used)
units: %
@@ -43,6 +51,8 @@ families: *
template: disk_fill_rate
on: disk.space
+ os: linux
+ hosts: *
families: *
lookup: min -10m at -50m unaligned of avail
calc: ($this - $avail) / (($now - $after) / 3600)
@@ -57,6 +67,8 @@ families: *
template: out_of_disk_space_time
on: disk.space
+ os: linux
+ hosts: *
families: *
calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf)
units: hours
@@ -77,6 +89,8 @@ families: *
template: 10min_disk_utilization
on: disk.util
+ os: linux
+ hosts: *
families: *
lookup: average -10m unaligned
units: %
@@ -97,6 +111,8 @@ families: *
template: 10min_disk_backlog
on: disk.backlog
+ os: linux
+ hosts: *
families: *
lookup: average -10m unaligned
units: ms
diff --git a/conf.d/health.d/entropy.conf b/conf.d/health.d/entropy.conf
index 5dd8af502..66d44ec13 100644
--- a/conf.d/health.d/entropy.conf
+++ b/conf.d/health.d/entropy.conf
@@ -5,6 +5,8 @@
alarm: lowest_entropy
on: system.entropy
+ os: linux
+ hosts: *
lookup: min -10m unaligned
units: entries
every: 5m
diff --git a/conf.d/health.d/ipc.conf b/conf.d/health.d/ipc.conf
index ee7c4badd..03cf264d8 100644
--- a/conf.d/health.d/ipc.conf
+++ b/conf.d/health.d/ipc.conf
@@ -1,6 +1,10 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
alarm: semaphores_used
on: system.ipc_semaphores
+ os: linux
+ hosts: *
calc: $semaphores * 100 / $ipc.semaphores.max
units: %
every: 10s
@@ -12,6 +16,8 @@
alarm: semaphore_arrays_used
on: system.ipc_semaphore_arrays
+ os: linux
+ hosts: *
calc: $arrays * 100 / $ipc.semaphores.arrays.max
units: %
every: 10s
diff --git a/conf.d/health.d/memory.conf b/conf.d/health.d/memory.conf
index 3c904f6b1..4a0e6e522 100644
--- a/conf.d/health.d/memory.conf
+++ b/conf.d/health.d/memory.conf
@@ -1,6 +1,10 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
alarm: 1hour_ecc_memory_correctable
on: mem.ecc_ce
+ os: linux
+ hosts: *
lookup: sum -10m unaligned
units: errors
every: 1m
@@ -11,6 +15,8 @@
alarm: 1hour_ecc_memory_uncorrectable
on: mem.ecc_ue
+ os: linux
+ hosts: *
lookup: sum -10m unaligned
units: errors
every: 1m
@@ -21,6 +27,8 @@
alarm: 1hour_memory_hw_corrupted
on: mem.hwcorrupt
+ os: linux
+ hosts: *
calc: $HardwareCorrupted
units: MB
every: 10s
diff --git a/conf.d/health.d/net.conf b/conf.d/health.d/net.conf
index bd288817b..00a198612 100644
--- a/conf.d/health.d/net.conf
+++ b/conf.d/health.d/net.conf
@@ -1,4 +1,6 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
# -----------------------------------------------------------------------------
# dropped packets
@@ -8,48 +10,56 @@
template: inbound_packets_dropped
on: net.drops
+ os: linux
+ hosts: *
families: *
lookup: sum -10m unaligned absolute of inbound
units: packets
every: 1m
- warn: $this > 0
+ warn: $this >= 5
delay: down 1h multiplier 1.5 max 2h
info: interface inbound dropped packets in the last 10 minutes
to: sysadmin
template: outbound_packets_dropped
on: net.drops
+ os: linux
+ hosts: *
families: *
lookup: sum -10m unaligned absolute of outbound
units: packets
every: 1m
- warn: $this > 0
+ warn: $this >= 5
delay: down 1h multiplier 1.5 max 2h
info: interface outbound dropped packets in the last 10 minutes
to: sysadmin
template: inbound_packets_dropped_ratio
on: net.packets
+ os: linux
+ hosts: *
families: *
lookup: sum -10m unaligned absolute of received
calc: (($inbound_packets_dropped != nan AND $this > 0) ? ($inbound_packets_dropped * 100 / $this) : (0))
units: %
every: 1m
- warn: $this > 0.5
- crit: $this > 3
+ warn: $this >= 0.1
+ crit: $this >= 2
delay: down 1h multiplier 1.5 max 2h
info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes
to: sysadmin
template: outbound_packets_dropped_ratio
on: net.packets
+ os: linux
+ hosts: *
families: *
lookup: sum -10m unaligned absolute of sent
calc: (($outbound_packets_dropped != nan AND $this > 0) ? ($outbound_packets_dropped * 100 / $this) : (0))
units: %
every: 1m
- warn: $this > 0.5
- crit: $this > 3
+ warn: $this >= 0.1
+ crit: $this >= 2
delay: down 1h multiplier 1.5 max 2h
info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes
to: sysadmin
@@ -65,6 +75,8 @@ families: *
template: 10min_fifo_errors
on: net.fifo
+ os: linux
+ hosts: *
families: *
lookup: sum -10m unaligned absolute
units: errors
@@ -86,6 +98,8 @@ families: *
template: 1m_received_packets_rate
on: net.packets
+ os: linux
+ hosts: *
families: *
lookup: average -1m of received
units: packets
@@ -94,6 +108,8 @@ families: *
template: 10s_received_packets_storm
on: net.packets
+ os: linux
+ hosts: *
families: *
lookup: average -10s of received
calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate))
diff --git a/conf.d/health.d/netfilter.conf b/conf.d/health.d/netfilter.conf
index 3dd6a67b3..fa1732b33 100644
--- a/conf.d/health.d/netfilter.conf
+++ b/conf.d/health.d/netfilter.conf
@@ -1,6 +1,10 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
alarm: netfilter_last_collected_secs
on: netfilter.conntrack_sockets
+ os: linux
+ hosts: *
calc: $now - $last_collected_t
units: seconds ago
every: 10s
@@ -12,6 +16,8 @@
alarm: netfilter_conntrack_full
on: netfilter.conntrack_sockets
+ os: linux
+ hosts: *
lookup: max -10s unaligned of connections
calc: $this * 100 / $netfilter.conntrack.max
units: %
diff --git a/conf.d/health.d/qos.conf b/conf.d/health.d/qos.conf
index 9e5939fdc..7290d15ff 100644
--- a/conf.d/health.d/qos.conf
+++ b/conf.d/health.d/qos.conf
@@ -1,10 +1,14 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
# check if a QoS class is dropping packets
# the alarm is checked every 10 seconds
# and examines the last minute of data
#template: 10min_qos_packet_drops
# on: tc.qos_dropped
+# os: linux
+# hosts: *
# lookup: sum -10m unaligned absolute
# every: 30s
# warn: $this > 0
diff --git a/conf.d/health.d/ram.conf b/conf.d/health.d/ram.conf
index b99e5e226..8d0e8838d 100644
--- a/conf.d/health.d/ram.conf
+++ b/conf.d/health.d/ram.conf
@@ -1,12 +1,18 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
alarm: used_ram_to_ignore
on: system.ram
+ os: linux
+ hosts: *
calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz)
every: 10s
info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
alarm: ram_in_use
on: system.ram
+ os: linux
+ hosts: *
# calc: $used * 100 / ($used + $cached + $free)
calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free)
units: %
diff --git a/conf.d/health.d/softnet.conf b/conf.d/health.d/softnet.conf
index 5faf9a9ee..64e1c6784 100644
--- a/conf.d/health.d/softnet.conf
+++ b/conf.d/health.d/softnet.conf
@@ -1,7 +1,12 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
# check for common /proc/net/softnet_stat errors
alarm: 10min_netdev_backlog_exceeded
on: system.softnet_stat
+ os: linux
+ hosts: *
lookup: sum -10m unaligned absolute of dropped
units: packets
every: 1m
@@ -12,6 +17,8 @@
alarm: 10min_netdev_budget_ran_outs
on: system.softnet_stat
+ os: linux
+ hosts: *
lookup: sum -10m unaligned absolute of squeezed
units: events
every: 1m
diff --git a/conf.d/health.d/swap.conf b/conf.d/health.d/swap.conf
index 7f57560e2..830a9af95 100644
--- a/conf.d/health.d/swap.conf
+++ b/conf.d/health.d/swap.conf
@@ -1,6 +1,10 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
alarm: 30min_ram_swapped_out
on: system.swapio
+ os: linux
+ hosts: *
lookup: sum -30m unaligned absolute of out
# we have to convert KB to MB by dividing $this (i.e. the result of the lookup) with 1024
calc: $this / 1024 * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free )
@@ -14,6 +18,8 @@
alarm: ram_in_swap
on: system.swap
+ os: linux
+ hosts: *
calc: $used * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free )
units: % of RAM
every: 10s
@@ -25,6 +31,8 @@
alarm: used_swap
on: system.swap
+ os: linux
+ hosts: *
calc: $used * 100 / ( $used + $free )
units: %
every: 10s
diff --git a/conf.d/health.d/tcp_resets.conf b/conf.d/health.d/tcp_resets.conf
index 803c88a81..fec124ac7 100644
--- a/conf.d/health.d/tcp_resets.conf
+++ b/conf.d/health.d/tcp_resets.conf
@@ -1,7 +1,12 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
# -----------------------------------------------------------------------------
alarm: ipv4_tcphandshake_last_collected_secs
on: ipv4.tcphandshake
+ os: linux
+ hosts: *
calc: $now - $last_collected_t
units: seconds ago
every: 10s
@@ -16,6 +21,8 @@
alarm: 1m_ipv4_tcp_resets_sent
on: ipv4.tcphandshake
+ os: linux
+ hosts: *
lookup: average -1m at -10s unaligned absolute of OutRsts
units: tcp resets/s
every: 10s
@@ -23,6 +30,8 @@
alarm: 10s_ipv4_tcp_resets_sent
on: ipv4.tcphandshake
+ os: linux
+ hosts: *
lookup: average -10s unaligned absolute of OutRsts
units: tcp resets/s
every: 10s
@@ -37,6 +46,8 @@ options: no-clear-notification
alarm: 1m_ipv4_tcp_resets_received
on: ipv4.tcphandshake
+ os: linux
+ hosts: *
lookup: average -1m at -10s unaligned absolute of AttemptFails
units: tcp resets/s
every: 10s
@@ -44,6 +55,8 @@ options: no-clear-notification
alarm: 10s_ipv4_tcp_resets_received
on: ipv4.tcphandshake
+ os: linux
+ hosts: *
lookup: average -10s unaligned absolute of AttemptFails
units: tcp resets/s
every: 10s
diff --git a/conf.d/health.d/udp_errors.conf b/conf.d/health.d/udp_errors.conf
index 98e955c02..33338b83e 100644
--- a/conf.d/health.d/udp_errors.conf
+++ b/conf.d/health.d/udp_errors.conf
@@ -1,7 +1,12 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
# -----------------------------------------------------------------------------
alarm: ipv4_udperrors_last_collected_secs
on: ipv4.udperrors
+ os: linux
+ hosts: *
calc: $now - $last_collected_t
units: seconds ago
every: 10s
@@ -16,6 +21,8 @@
alarm: 1m_ipv4_udp_receive_buffer_errors
on: ipv4.udperrors
+ os: linux
+ hosts: *
lookup: sum -1m unaligned absolute of RcvbufErrors
units: errors
every: 10s
@@ -30,6 +37,8 @@
alarm: 1m_ipv4_udp_send_buffer_errors
on: ipv4.udperrors
+ os: linux
+ hosts: *
lookup: sum -1m unaligned absolute of SndbufErrors
units: errors
every: 10s
diff --git a/conf.d/health_alarm_notify.conf b/conf.d/health_alarm_notify.conf
index 4d8444ed5..641272ced 100644
--- a/conf.d/health_alarm_notify.conf
+++ b/conf.d/health_alarm_notify.conf
@@ -94,6 +94,15 @@ curl=""
# multiple recipients can be given like this:
# "admin1@example.com admin2@example.com ..."
+# the email address sending email notifications
+# the default is the system user netdata runs as (usually: netdata)
+# The following formats are supported:
+# EMAIL_SENDER="user@domain"
+# EMAIL_SENDER="User Name <user@domain>"
+# EMAIL_SENDER="'User Name' <user@domain>"
+# EMAIL_SENDER="\"User Name\" <user@domain>"
+EMAIL_SENDER=""
+
# enable/disable sending emails
SEND_EMAIL="YES"
@@ -101,6 +110,13 @@ SEND_EMAIL="YES"
DEFAULT_RECIPIENT_EMAIL="root"
# to receive only critical alarms, set it to "root|critical"
+# Optionally specify the encoding to list in the Content-Type header.
+# This doesn't change what encoding the e-mail is sent with, just what
+# the headers say it was encoded as.
+# This shouldn't need to be changed as it will almost always be
+# autodetected from the environment.
+#EMAIL_CHARSET="UTF-8"
+
#------------------------------------------------------------------------------
# pushover (pushover.net) global notification options
diff --git a/conf.d/node.d/fronius.conf.md b/conf.d/node.d/fronius.conf.md
index c80afa0b5..622086b27 100644
--- a/conf.d/node.d/fronius.conf.md
+++ b/conf.d/node.d/fronius.conf.md
@@ -16,7 +16,7 @@ The module supports any number of servers. Sometimes there is a lag when collect
"update_every": 5,
"servers": [
{
- "name": "Solar",
+ "name": "solar",
"hostname": "symo.ip.or.dns",
"update_every": 5,
"api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
diff --git a/conf.d/node.d/stiebeleltron.conf.md b/conf.d/node.d/stiebeleltron.conf.md
new file mode 100644
index 000000000..6ae5aa1c7
--- /dev/null
+++ b/conf.d/node.d/stiebeleltron.conf.md
@@ -0,0 +1,453 @@
+[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html)
+
+Original author: BrainDoctor (github)
+
+The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
+
+### Testing
+This plugin has been tested within the following environment:
+ * ISG version: 8.5.6
+ * MFG version: 12
+ * Controller version: 9
+ * July (summer time, not much activity)
+ * Interface language: English
+ * login- and password-less ISG web access (without HTTPS it's useless anyway)
+ * Heatpump model: WPL 25 I-2
+ * Hot water boiler model: 820 WT 1
+
+So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s.
+
+In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me.
+
+### How to update the config
+
+* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
+* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
+* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
+ Recommended: [regexr.com](regexr.com) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
+
+The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes.
+### Configuration template
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "pages": [
+ {
+ "name": "System",
+ "id": "system",
+ "url": "http://machine.ip.or.dns/?s=1,0",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "eletricreheating",
+ "name": "electric reheating",
+ "charts": [
+ {
+ "title": "Dual Mode Reheating Temperature",
+ "id": "reheatingtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "dualmodeheatingtemp",
+ "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "dualmodehotwatertemp",
+ "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "roomtemp",
+ "name": "room temperature",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "heating",
+ "name": "heating",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Temperature",
+ "id": "flowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Reheating",
+ "id" : "reheating",
+ "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Buffer Temperature",
+ "id": "buffertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Fixed Temperature",
+ "id": "fixedtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Set",
+ "id" : "setfixed",
+ "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Pre-flow Temperature",
+ "id": "preflowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 6,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actualreturn",
+ "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "hotwater",
+ "name": "hot water",
+ "charts": [
+ {
+ "title": "Hot Water Temperature",
+ "id": "hotwatertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "general",
+ "name": "general",
+ "charts": [
+ {
+ "title": "Outside Temperature",
+ "id": "outside",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Outside temperature",
+ "id": "outsidetemp",
+ "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Condenser Temperature",
+ "id": "condenser",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Condenser",
+ "id": "condenser",
+ "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Heating Circuit Pressure",
+ "id": "heatingcircuit",
+ "unit": "bar",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating Circuit",
+ "id": "heatingcircuit",
+ "digits": 2,
+ "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Rate",
+ "id": "flowrate",
+ "unit": "liters/min",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Flow Rate",
+ "id": "flowrate",
+ "digits": 2,
+ "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Output",
+ "id": "output",
+ "unit": "%",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Heat Pump",
+ "id": "outputheatpump",
+ "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ },
+ {
+ "name": "Water Pump",
+ "id": "intpumprate",
+ "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "name": "Heat Pump",
+ "id": "heatpump",
+ "url": "http://machine.ip.or.dns/?s=1,1",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "runtime",
+ "name": "runtime",
+ "charts": [
+ {
+ "title": "Compressor",
+ "id": "compressor",
+ "unit": "h",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "hotwater",
+ "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Reheating",
+ "id": "reheating",
+ "unit": "h",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Reheating 1",
+ "id": "rh1",
+ "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Reheating 2",
+ "id" : "rh2",
+ "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "processdata",
+ "name": "process data",
+ "charts": [
+ {
+ "title": "Remaining Compressor Rest Time",
+ "id": "remaincomp",
+ "unit": "s",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Timer",
+ "id": "timer",
+ "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "energy",
+ "name": "energy",
+ "charts": [
+ {
+ "title": "Compressor Today",
+ "id": "compressorday",
+ "unit": "kWh",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Compressor Total",
+ "id": "compressortotal",
+ "unit": "MWh",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+```
diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf
index 0a37e40ae..741d49914 100644
--- a/conf.d/python.d.conf
+++ b/conf.d/python.d.conf
@@ -30,6 +30,7 @@ log_interval: 3600
apache_cache: no
# apache: yes
# bind_rndc: yes
+# chrony: yes
# cpufreq: yes
# cpuidle: yes
# dns_query_time: yes
diff --git a/conf.d/python.d/chrony.conf b/conf.d/python.d/chrony.conf
new file mode 100644
index 000000000..46229687b
--- /dev/null
+++ b/conf.d/python.d/chrony.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for chrony
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, chrony also supports the following:
+#
+# command: 'chrony tracking' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED chrony CONFIGURATION
+#
+# netdata will query chrony as user netdata.
+# verify that user netdata is allowed to call 'chronyc tracking'
+# Check cmdallow in chrony.conf
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'chronyc -n tracking'
diff --git a/conf.d/python.d/postgres.conf b/conf.d/python.d/postgres.conf
index 12dddae67..1dbb64f40 100644
--- a/conf.d/python.d/postgres.conf
+++ b/conf.d/python.d/postgres.conf
@@ -75,6 +75,9 @@
# a postgres user for netdata and add its password below to allow
# netdata connect.
#
+# Without superuser access, netdata won't be able to generate the write
+# ahead log and the background writer charts.
+#
# ----------------------------------------------------------------------
socket:
diff --git a/conf.d/python.d/tomcat.conf b/conf.d/python.d/tomcat.conf
index aef9631b9..ce89175f6 100644
--- a/conf.d/python.d/tomcat.conf
+++ b/conf.d/python.d/tomcat.conf
@@ -63,7 +63,10 @@
# user: 'username'
# pass: 'password'
#
-
+# if you have multiple connectors, the following are supported:
+#
+# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
+#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
diff --git a/conf.d/python.d/web_log.conf b/conf.d/python.d/web_log.conf
index e51b565d6..cd1f1af00 100644
--- a/conf.d/python.d/web_log.conf
+++ b/conf.d/python.d/web_log.conf
@@ -76,7 +76,7 @@
# observium: 'observium.*' # name(dimension): REGEX to match
# stub_status: 'stub_status' # name(dimension): REGEX to match
# custom_log_format: # define a custom log format
-# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d\.\d+) '
+# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) '
# time_multiplier: 1000000 # type <int> - convert time to microseconds
# ----------------------------------------------------------------------
diff --git a/config.h.in b/config.h.in
index 156f9a728..635316644 100644
--- a/config.h.in
+++ b/config.h.in
@@ -108,6 +108,9 @@
/* Define to 1 if you have the `sched_setscheduler' function. */
#undef HAVE_SCHED_SETSCHEDULER
+/* Define 1 if you have setns() function */
+#undef HAVE_SETNS
+
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
diff --git a/configs.signatures b/configs.signatures
index d1308a882..f22431c26 100644
--- a/configs.signatures
+++ b/configs.signatures
@@ -15,6 +15,7 @@ declare -A configs_signatures=(
['05a8f39f134850c1e8d6267dbe706273']='health.d/web_log.conf'
['061c45b0e34170d357e47883166ecf40']='python.d/nginx.conf'
['074df527cc70b5f38c0714f08f20e57c']='health.d/apache.conf'
+ ['0787e67357804b934d2866f1b7c60b14']='health.d/ipc.conf'
['08042325ab27256b938575deafee8ecf']='python.d/nginx.conf'
['0847d54a7a0c7e0381c52e9d4d3fa7db']='health.d/mdstat.conf'
['084ee72d64760f2641b0720e79c922f3']='health.d/cpu.conf'
@@ -36,6 +37,7 @@ declare -A configs_signatures=(
['107e6ac69b30fb9837ac64c35f891ec7']='health.d/tcp_resets.conf'
['10c3b525850a1cb9de760a8ee96fbc6e']='charts.d/opensips.conf'
['1112c848ef91ebb9c622020d09712d67']='health.d/net.conf'
+ ['12a4c7803ae79506a14ea784fea60dce']='health.d/net.conf'
['13141998a5d71308d9c119834c27bfd3']='python.d.conf'
['142a5b693d34b0308bb0b8aec71fad79']='python.d/postfix.conf'
['14783e051650442ec9e2ed38d81d667e']='charts.d/exim.conf'
@@ -49,6 +51,7 @@ declare -A configs_signatures=(
['1972e48345e6c3f0d65f94a03317622b']='health_alarm_notify.conf'
['1c12b678ab65f271a96da1bbd0a1ab1c']='health.d/softnet.conf'
['1c3168c95b53e999df3d45162b3f50b8']='health.d/fping.conf'
+ ['1c71a8792c5c0ed035dd97af93a04838']='health_alarm_notify.conf'
['1d6efba856acaaaf3b50bc6d66611b92']='python.d/web_log.conf'
['1e09f326178acf07d361c08a44d8b1f3']='python.d/rabbitmq.conf'
['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf'
@@ -63,6 +66,7 @@ declare -A configs_signatures=(
['22952dbf42647c583b005054b23b545f']='health.d/disks.conf'
['22ceb822983134a7ca67343241f30341']='health.d/disks.conf'
['2385e5d35b440619621c4af62492d91b']='health.d/disks.conf'
+ ['23a5afe5260a7ad388e447709cb009df']='python.d/web_log.conf'
['23ae815aefa221b1929f96752a1f7556']='health.d/squid.conf'
['243503ceee1d5b4e1e55a28768a116ae']='health.d/net.conf'
['2472e49550326f7142e2c425ccbca005']='health.d/softnet.conf'
@@ -71,12 +75,14 @@ declare -A configs_signatures=(
['262f98b3d88b98978cb08d566ce85a9d']='charts.d/squid.conf'
['2827de41cf34a91b7a8e4d8724f59668']='health.d/net.conf'
['28df44a90e8ea4c6156314c03e88bf44']='health.d/softnet.conf'
+ ['292c6cbbb5c819bb91f87c02a45890c1']='health.d/swap.conf'
['297160ae7ee01a547ed14f857b4f2c8d']='health.d/memcached.conf'
['298504f331c55dff4055321ff3a7b5cc']='health.d/web_log.conf'
['29f97e10b92333790fbe0d2a3617b736']='health_alarm_notify.conf'
['2a0794fd43eadf30a51805bc9ba2c64d']='python.d/hddtemp.conf'
['2ad55a5d1e885cf142849a78d4b00401']='health.d/net.conf'
['2bbbebf52f84fd27fbefecd2a8a8076f']='health.d/memcached.conf'
+ ['2c2b7e8df922b2bf121fb7db32bbc3bd']='health.d/udp_errors.conf'
['2d1d7498c72f4245cf32902c2b7e71e0']='health.d/entropy.conf'
['2f3a8e33df83f14e0af8ca2465697215']='python.d/exim.conf'
['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf'
@@ -122,6 +128,7 @@ declare -A configs_signatures=(
['42bf1c7c64ed77038a0aa094d792a9e2']='python.d/mysql.conf'
['4332dee96e4f38fc73c962df3494ab7c']='health_alarm_notify.conf'
['43ebb7f224c3b232d8ad044d7e9508b6']='health.d/net.conf'
+ ['4401f0c6a101d35d2cb833e7b0aeb421']='health.d/qos.conf'
['444e20cf75e2cd019e8d412d5d1f4a7f']='charts.d/cpu_apps.conf'
['4461bfacf9a3da47770fb3ca31f4c91f']='health.d/net.conf'
['450667c552ab7a7d8d4a2c214fdacca5']='health.d/entropy.conf'
@@ -138,6 +145,7 @@ declare -A configs_signatures=(
['4cd585f5dfdacaf287413ad037b4e60a']='apps_groups.conf'
['4d13684cadfa90e73ab465409bf7263b']='health.d/mysql.conf'
['4d91ee6fe4c887ea3865ef36ac63da3c']='health.d/mysql.conf'
+ ['4da1c0f009d87995ed66d84fae07f09a']='health.d/memory.conf'
['4e995acb0d6fd77403a2a9dca984b55b']='charts.d.conf'
['4f6a5b47a13f5912cc89e9286701dd08']='health.d/redis.conf'
['4f6f4d39c19d7d954f769d3f9d3b4da5']='health.d/memcached.conf'
@@ -163,6 +171,7 @@ declare -A configs_signatures=(
['56b689031cdcf138064825f31474b37d']='apps_groups.conf'
['573398335c0c71c075fa57f702bce287']='health.d/disks.conf'
['5829812db29598db5857c9f433e96fef']='python.d/apache.conf'
+ ['5855dd70d71c8497e5591b0690162c9c']='health.d/tcp_resets.conf'
['58e835b7176865ec5a6f59f7aba832bf']='health.d/named.conf'
['598f9814966a9e2fe48e8218151d3fa6']='stream.conf'
['59dded33e3adfe622f36c557a4f4bed7']='health.d/net.conf'
@@ -181,6 +190,7 @@ declare -A configs_signatures=(
['632c28d714c87a4969d11cf36a5edaa8']='health.d/web_log.conf'
['636d032928ea0f4741eab264fb49c099']='apps_groups.conf'
['6398ef37a15cb6a0bc921f58948d2b39']='health.d/softnet.conf'
+ ['63c626bc64b3d7bc46a72fbccf9b1926']='health.d/net.conf'
['64070d856ab1b47a18ec871e49bbc13b']='python.d/squid.conf'
['647361e99b5f4e0d73470c569bb9461c']='apps_groups.conf'
['64ac37868097a462e5ee6905c350267e']='python.d/postgres.conf'
@@ -245,6 +255,8 @@ declare -A configs_signatures=(
['80df37b89e852d585209b8c02bb94312']='python.d/bind_rndc.conf'
['80f109ff293ac94222bf3959432751bd']='health.d/qos.conf'
['81255035f6d53534938085df72cdef23']='health.d/nginx.conf'
+ ['8170ba3ae507cf9322bd60350348552e']='health.d/net.conf'
+ ['81fd16f29d5f3d422fe1cee82dc8ed9d']='health.d/cpu.conf'
['8213d921b6a8382e27052fb42d81db3d']='python.d/freeradius.conf'
['8214bb8f4b005aa4691fcd38f7331e8f']='health.d/swap.conf'
['837480f77ba1a85677a36747fbc2cd2e']='python.d/sensors.conf'
@@ -266,6 +278,7 @@ declare -A configs_signatures=(
['8a66a3085ad8892a002ff39b18b2cb07']='python.d/fail2ban.conf'
['8c1d41e2c88aeca78bc319ed74c8748c']='python.d/phpfpm.conf'
['8d0552371a7c9725a04196fa560813d1']='health.d/cpu.conf'
+ ['8d24873bb25c195026918f15626310ea']='health.d/softnet.conf'
['8dc0bd0a70b5117454bd5f5b98f91c2c']='health.d/disks.conf'
['8f4f925c1e97dd164007495ec5135ffc']='health.d/fping.conf'
['8f7b734ea0f89abf8acbb47c50234477']='health.d/web_log.conf'
@@ -274,6 +287,7 @@ declare -A configs_signatures=(
['91c757ef6be3abdb86906d9dbb9c217a']='fping.conf'
['91cf3b3d42cac969b8b3fd4f531ecfb3']='python.d/squid.conf'
['91e1a9703debbdc64edf124419fdc14b']='python.d/elasticsearch.conf'
+ ['9347bcce0b3574ac5193d43248d2e3cc']='python.d/chrony.conf'
['9542f80def48ba105190f6cdaa18248e']='health.d/mysql.conf'
['97eee7a30e6419df4537242e9d4a719d']='health.d/mysql.conf'
['97f337eb96213f3ede05e522e3743a6c']='python.d/memcached.conf'
@@ -292,11 +306,13 @@ declare -A configs_signatures=(
['a03f3e38378385bf87d4c0f81eb1f108']='health.d/tcp_resets.conf'
['a09714b5942cf25a89ec3da1dbc18063']='health.d/ram.conf'
['a0b3a12389c9c56dfe35964b20b59836']='health.d/bind_rndc.conf'
+ ['a0c0ef7ca9671f4b5e797d4276e5c0dd']='health.d/disks.conf'
['a0ee8f351f213c0e8af9eb7a4a09cb95']='apps_groups.conf'
['a1b6dfe312b896b0b1ba471e8ac07f95']='python.d/isc_dhcpd.conf'
['a2944a309f8ce1a3195451856478d6ae']='python.d.conf'
['a2a647dc492dc2d6ed1f5c0fdc97a96e']='python.d/mongodb.conf'
['a305b400378d6492efd15f9940c2779b']='health.d/softnet.conf'
+ ['a41885acf112563e3446f9d937362c9b']='python.d/chrony.conf'
['a4407787e4beb23a701a8a614dca461d']='health.d/disks.conf'
['a44899a5795bed2863c1d11aa3e85586']='health.d/swap.conf'
['a4a8660728c6afcb528cc6b378897d6b']='health.d/squid.conf'
@@ -324,9 +340,12 @@ declare -A configs_signatures=(
['aa8b57a733c2035917acf81a8ebdfbe7']='health.d/haproxy.conf'
['aac44691a1cf95fa8f8990a79bab4ce1']='python.d/web_log.conf'
['abaf2e021f9f6ee5d1c4e4726f47348e']='health.d/ipc.conf'
+ ['abe1a80ac6d6f97bd324e72f31e8256e']='health.d/ram.conf'
['acaa6731a272f6d251afb357e99b518f']='apps_groups.conf'
+ ['ad15b251b93f8b16bb33ec508f44a598']='health.d/netfilter.conf'
['ade389c1b6efe0cff47c33e662731f0a']='python.d/squid.conf'
['ae5ac0a3521e50aa6f6eda2a330b4075']='python.d/example.conf'
+ ['af12051cf57dd4e484ef8e64502b7549']='health.d/net.conf'
['af14667ee7993acea810f6d50923bdc9']='health.d/web_log.conf'
['af44cc53aa2bc5cc8935667119567522']='python.d.conf'
['afdae4646c755ff2d117527fbf761c8e']='health.d/disks.conf'
@@ -337,7 +356,9 @@ declare -A configs_signatures=(
['b185914d4f795e1732273dc4c7a35845']='health.d/memory.conf'
['b27f10a38a95edbbec20f44a4728b7c4']='python.d.conf'
['b32164929eda7449a9677044e11151bf']='python.d.conf'
+ ['b3d48935ab7f44a57d40ad349df0033d']='python.d/postgres.conf'
['b3fc4749b132e55ac0d3a0f92859237e']='health.d/tcp_resets.conf'
+ ['b4825f731cc7eb03b374eade14a453c1']='health.d/net.conf'
['b5b5a8d6d991fb1cef8d80afa23ba114']='python.d/cpufreq.conf'
['b636e5e603f9d93e52c7577ac8c6bf0c']='health.d/entropy.conf'
['b68706bb8101ef85192db92f865a5d80']='health_alarm_notify.conf'
@@ -357,11 +378,13 @@ declare -A configs_signatures=(
['bf66f113b2dd8d8fb444cbd5650f284c']='health_alarm_notify.conf'
['c004430f55310ae9ed489c4905ed02cb']='charts.d/apache.conf'
['c080e006f544c949baca33cc24a9c126']='health_alarm_notify.conf'
+ ['c132d2e257fc4df2925be7ad75100d5b']='health.d/entropy.conf'
['c1a7e634b5b8aad523a0d115a93379cd']='health.d/memcached.conf'
['c3296c08260bcd556e74711c820817be']='health.d/cpu.conf'
['c3661b68232e06de90bb5e63e725b8b6']='health_alarm_notify.conf'
['c61948101e0e6846679682794ee48c5b']='python.d/nginx.conf'
['c6403d8b1bcfa52d3abb941be155fc03']='python.d.conf'
+ ['c6b9f31e14adca433f82054f62388c47']='python.d/web_log.conf'
['c84fd3292710091802e443c8e688dee1']='health_alarm_notify.conf'
['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf'
['c94cb4f4eeaa13c1dcee6248deb01829']='python.d/postgres.conf'
@@ -387,10 +410,12 @@ declare -A configs_signatures=(
['d1596fe068c8674efade49a4a8e22b5d']='health.d/isc_dhcpd.conf'
['d162b7465a56151312e60151c1d74fba']='health.d/squid.conf'
['d1e79707cd9b51a14288e8dd40694fcc']='fping.conf'
+ ['d29c5fa5faf74b86d01c2270a79388d8']='health.d/disks.conf'
['d2b2ad30e277a69d8713e620dabc18bc']='python.d/phpfpm.conf'
['d55bdb83b9ff606852f6a97c1430258c']='health.d/ram.conf'
['d55be5bb5e108da1e7645da007c53cd4']='python.d.conf'
['d5dab509d8792f795bece27de39dd476']='health.d/mysql.conf'
+ ['d69eba15d3e968187a938a7b98e22dda']='python.d.conf'
['d74dc63fbe631dab9a2ff1b0f5d71719']='python.d/hddtemp.conf'
['d7e0bd12d4a60a761dcab3531a841711']='python.d/phpfpm.conf'
['d8dc489e32f7114c6298fce94e86a8ef']='health.d/entropy.conf'
@@ -406,6 +431,7 @@ declare -A configs_signatures=(
['ddda2bb1c88be03b637d3285406f7910']='health.d/named.conf'
['dddc4f93e6187fe4220eb6bf5e20f095']='health.d/ram.conf'
['de02f899a61f21b86adb646940f0bcae']='health.d/net.conf'
+ ['de5fe159e14b481d6bd69856eaddd242']='health_alarm_notify.conf'
['def883f35986c9d25de63b1a8e7d0f46']='health.d/entropy.conf'
['df381f3a7ca9fb2b4b43ae7cb7a4c492']='python.d/mysql.conf'
['df7e8044902b5e155fad8430c2ddcfa8']='health.d/fping.conf'
@@ -423,10 +449,12 @@ declare -A configs_signatures=(
['e3e5bc57335c489f01b8559f5c70e112']='python.d/squid.conf'
['e40947d22f7ed5359f12fc89e3512963']='python.d/dovecot.conf'
['e449e5582279742496550df14b6fca95']='health.d/entropy.conf'
+ ['e4ed13f996434ac17b40a2228c96283b']='python.d/tomcat.conf'
['e5f32f54d6d6728f21f9ac26f37d6573']='python.d/example.conf'
['e734c5951a8764d4d9de046dd7cf7407']='health.d/softnet.conf'
['e7bc22a1942cffbd2b1b0cfd119ee328']='health.d/ipfs.conf'
['e8ec8046c7007af6ca3e8c51e62c99f8']='health.d/disks.conf'
+ ['ea031c1c0c36edee3bd08fae559c4203']='health_alarm_notify.conf'
['ea1a96c42ad464c354fb250e3408c3e8']='stream.conf'
['eaa7beb935cae9c48a40fb934eb105a7']='health.d/web_log.conf'
['eb5168f0b516bc982aac45e59da6e52e']='health.d/nginx.conf'
diff --git a/configure b/configure
index 6a9d15b9c..e1fae5540 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for netdata 1.7.0.
+# Generated by GNU Autoconf 2.69 for netdata 1.8.0.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='netdata'
PACKAGE_TARNAME='netdata'
-PACKAGE_VERSION='1.7.0'
-PACKAGE_STRING='netdata 1.7.0'
+PACKAGE_VERSION='1.8.0'
+PACKAGE_STRING='netdata 1.8.0'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -619,8 +619,8 @@ ac_includes_default="\
# include <unistd.h>
#endif"
-ac_func_list=
ac_header_list=
+ac_func_list=
ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
@@ -647,6 +647,8 @@ chartsdir
cachedir
registrydir
varlibdir
+ENABLE_PLUGIN_CGROUP_NETWORK_FALSE
+ENABLE_PLUGIN_CGROUP_NETWORK_TRUE
ENABLE_PLUGIN_NFACCT_FALSE
ENABLE_PLUGIN_NFACCT_TRUE
LIBMNL_LIBS
@@ -792,6 +794,7 @@ enable_dependency_tracking
enable_plugin_nfacct
enable_plugin_freeipmi
enable_pedantic
+enable_accept4
with_webdir
with_libcap
with_zlib
@@ -1371,7 +1374,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures netdata 1.7.0 to adapt to many kinds of systems.
+\`configure' configures netdata 1.8.0 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1441,7 +1444,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of netdata 1.7.0:";;
+ short | recursive ) echo "Configuration of netdata 1.8.0:";;
esac
cat <<\_ACEOF
@@ -1458,21 +1461,23 @@ Optional Features:
do not reject slow dependency extractors
--disable-dependency-tracking
speeds up one-time build
- --enable-plugin-nfacct enable nfacct plugin, requires root
+ --enable-plugin-nfacct enable nfacct plugin, requires running netdata as
+ root [default disabled]
--enable-plugin-freeipmi
- enable freeipmi plugin
- --enable-pedantic enable pedantic compiler warnings
+ enable freeipmi plugin [default autodetect]
+ --enable-pedantic enable pedantic compiler warnings [default disabled]
+ --disable-accept4 System does not have accept4 [default autodetect]
--disable-x86-sse SSE/SS2 optimizations on x86 [default enabled]
- --disable-lto Link Time Optimizations [default enabled]
+ --disable-lto Link Time Optimizations [default autodetect]
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
--without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
--with-webdir location of webdir [PKGDATADIR/web]
- --with-libcap build with libcap
- --with-zlib build with zlib
- --with-math build with math
- --with-user use this user to drop privilege
+ --with-libcap build with libcap [default autodetect]
+ --without-zlib build without zlib [default enabled]
+ --without-math build without math [default enabled]
+ --with-user use this user to drop privilege [default nobody]
--with-jemalloc-prefix=PREFIX
Specify the jemalloc prefix [default=""]
--with-jemalloc=DIR use a specific jemalloc library
@@ -1582,7 +1587,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-netdata configure 1.7.0
+netdata configure 1.8.0
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -1877,6 +1882,60 @@ fi
} # ac_fn_c_try_link
+# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
+# -------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_c_check_type ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_type
+
# ac_fn_c_check_func LINENO FUNC VAR
# ----------------------------------
# Tests whether FUNC exists, setting the cache variable VAR accordingly
@@ -1944,60 +2003,6 @@ $as_echo "$ac_res" >&6; }
} # ac_fn_c_check_func
-# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
-# -------------------------------------------
-# Tests whether TYPE exists after having included INCLUDES, setting cache
-# variable VAR accordingly.
-ac_fn_c_check_type ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if eval \${$3+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- eval "$3=no"
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-$4
-int
-main ()
-{
-if (sizeof ($2))
- return 0;
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-$4
-int
-main ()
-{
-if (sizeof (($2)))
- return 0;
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
- eval "$3=yes"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
-
-} # ac_fn_c_check_type
-
# ac_fn_c_find_intX_t LINENO BITS VAR
# -----------------------------------
# Finds a signed integer type with width BITS, setting cache variable VAR
@@ -2360,7 +2365,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by netdata $as_me 1.7.0, which was
+It was created by netdata $as_me 1.8.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2640,8 +2645,8 @@ $as_echo "$as_me: creating cache $cache_file" >&6;}
>$cache_file
fi
-as_fn_append ac_func_list " accept4"
as_fn_append ac_header_list " sys/prctl.h"
+as_fn_append ac_func_list " accept4"
as_fn_append ac_header_list " linux/netfilter/nfnetlink_conntrack.h"
# Check that the precious variables saved in the cache have kept the same
# value.
@@ -2741,7 +2746,7 @@ $as_echo "$as_me: ***************** MAINTAINER MODE *****************" >&6;}
PACKAGE_BUILT_DATE=$(date '+%d %b %Y')
fi
-PACKAGE_RPM_VERSION="1.7.0"
+PACKAGE_RPM_VERSION="1.8.0"
@@ -3268,7 +3273,7 @@ fi
# Define the identity of the package.
PACKAGE='netdata'
- VERSION='1.7.0'
+ VERSION='1.8.0'
cat >>confdefs.h <<_ACEOF
@@ -3312,7 +3317,92 @@ AMTAR='$${TAR-tar}'
# We'll loop over all known methods to create a tar archive until one works.
_am_tools='gnutar pax cpio none'
-am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a pax tar archive" >&5
+$as_echo_n "checking how to create a pax tar archive... " >&6; }
+
+ # Go ahead even if we have the value already cached. We do so because we
+ # need to set the values for the 'am__tar' and 'am__untar' variables.
+ _am_tools=${am_cv_prog_tar_pax-$_am_tools}
+
+ for _am_tool in $_am_tools; do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar; do
+ { echo "$as_me:$LINENO: $_am_tar --version" >&5
+ ($_am_tar --version) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && break
+ done
+ am__tar="$_am_tar --format=posix -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=posix -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x pax -w "$$tardir"'
+ am__tar_='pax -L -x pax -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H pax -L'
+ am__tar_='find "$tardir" -print | cpio -o -H pax -L'
+ am__untar='cpio -i -H pax -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_pax}" && break
+
+ # tar/untar a dummy directory, and stop if the command works.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5
+ (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ { echo "$as_me:$LINENO: $am__untar <conftest.tar" >&5
+ ($am__untar <conftest.tar) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ { echo "$as_me:$LINENO: cat conftest.dir/file" >&5
+ (cat conftest.dir/file) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+ done
+ rm -rf conftest.dir
+
+ if ${am_cv_prog_tar_pax+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ am_cv_prog_tar_pax=$_am_tool
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_pax" >&5
+$as_echo "$am_cv_prog_tar_pax" >&6; }
@@ -5079,6 +5169,14 @@ else
fi
+# Check whether --enable-accept4 was given.
+if test "${enable_accept4+set}" = set; then :
+ enableval=$enable_accept4;
+else
+ enable_accept4="detect"
+
+fi
+
# Check whether --with-webdir was given.
if test "${with_webdir+set}" = set; then :
@@ -5399,24 +5497,6 @@ fi
-
-
-
- for ac_func in $ac_func_list
-do :
- as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
-ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
- cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-done
-
-
-
-
ac_fn_c_check_type "$LINENO" "struct timespec" "ac_cv_type_struct_timespec" "#include <time.h>
"
if test "x$ac_cv_type_struct_timespec" = xyes; then :
@@ -5972,6 +6052,26 @@ done
+if test "${enable_accept4}" != "no"; then
+
+
+
+ for ac_func in $ac_func_list
+do :
+ as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+
+
+fi
# -----------------------------------------------------------------------------
# operating system detection
@@ -7688,6 +7788,36 @@ fi
# -----------------------------------------------------------------------------
+# check for setns() - cgroup-network
+
+ac_fn_c_check_func "$LINENO" "setns" "ac_cv_func_setns"
+if test "x$ac_cv_func_setns" = xyes; then :
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if cgroup-network can be enabled" >&5
+$as_echo_n "checking if cgroup-network can be enabled... " >&6; }
+if test "$ac_cv_func_setns" = "yes" ; then
+ have_setns="yes"
+
+$as_echo "#define HAVE_SETNS 1" >>confdefs.h
+
+else
+ have_setns="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${have_setns}" >&5
+$as_echo "${have_setns}" >&6; }
+ if test "${have_setns}" = "yes"; then
+ ENABLE_PLUGIN_CGROUP_NETWORK_TRUE=
+ ENABLE_PLUGIN_CGROUP_NETWORK_FALSE='#'
+else
+ ENABLE_PLUGIN_CGROUP_NETWORK_TRUE='#'
+ ENABLE_PLUGIN_CGROUP_NETWORK_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
# Link-Time-Optimization
if test "${enable_lto}" != "no"; then
@@ -7827,7 +7957,7 @@ pluginsdir="\$(libexecdir)/netdata/plugins.d"
-ac_config_files="$ac_config_files Makefile charts.d/Makefile conf.d/Makefile netdata.spec python.d/Makefile node.d/Makefile plugins.d/Makefile src/Makefile system/Makefile web/Makefile contrib/Makefile"
+ac_config_files="$ac_config_files Makefile charts.d/Makefile conf.d/Makefile netdata.spec python.d/Makefile node.d/Makefile plugins.d/Makefile src/Makefile system/Makefile web/Makefile diagrams/Makefile makeself/Makefile contrib/Makefile tests/Makefile"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
@@ -7994,6 +8124,10 @@ if test -z "${ENABLE_PLUGIN_NFACCT_TRUE}" && test -z "${ENABLE_PLUGIN_NFACCT_FAL
as_fn_error $? "conditional \"ENABLE_PLUGIN_NFACCT\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${ENABLE_PLUGIN_CGROUP_NETWORK_TRUE}" && test -z "${ENABLE_PLUGIN_CGROUP_NETWORK_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_CGROUP_NETWORK\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
: "${CONFIG_STATUS=./config.status}"
ac_write_fail=0
@@ -8391,7 +8525,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by netdata $as_me 1.7.0, which was
+This file was extended by netdata $as_me 1.8.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -8457,7 +8591,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-netdata config.status 1.7.0
+netdata config.status 1.8.0
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
@@ -8598,7 +8732,10 @@ do
"src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;;
"system/Makefile") CONFIG_FILES="$CONFIG_FILES system/Makefile" ;;
"web/Makefile") CONFIG_FILES="$CONFIG_FILES web/Makefile" ;;
+ "diagrams/Makefile") CONFIG_FILES="$CONFIG_FILES diagrams/Makefile" ;;
+ "makeself/Makefile") CONFIG_FILES="$CONFIG_FILES makeself/Makefile" ;;
"contrib/Makefile") CONFIG_FILES="$CONFIG_FILES contrib/Makefile" ;;
+ "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;;
*) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
diff --git a/configure.ac b/configure.ac
index 010e0e939..16381f8dd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@
AC_PREREQ(2.60)
define([VERSION_MAJOR], [1])
-define([VERSION_MINOR], [7])
+define([VERSION_MINOR], [8])
define([VERSION_FIX], [0])
define([VERSION_NUMBER], VERSION_MAJOR[.]VERSION_MINOR[.]VERSION_FIX)
define([VERSION_SUFFIX], [])
@@ -36,7 +36,7 @@ AC_CONFIG_AUX_DIR([.])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_SRCDIR([src/main.c])
-AM_INIT_AUTOMAKE
+AM_INIT_AUTOMAKE([tar-pax])
AC_CANONICAL_HOST
AC_PROG_CC
AC_PROG_INSTALL
@@ -49,22 +49,28 @@ AC_USE_SYSTEM_EXTENSIONS
AC_ARG_ENABLE(
[plugin-nfacct],
- [AS_HELP_STRING([--enable-plugin-nfacct], [enable nfacct plugin, requires root])],
+ [AS_HELP_STRING([--enable-plugin-nfacct], [enable nfacct plugin, requires running netdata as root @<:@default disabled@:>@])],
,
[enable_plugin_nfacct="no"]
)
AC_ARG_ENABLE(
[plugin-freeipmi],
- [AS_HELP_STRING([--enable-plugin-freeipmi], [enable freeipmi plugin])],
+ [AS_HELP_STRING([--enable-plugin-freeipmi], [enable freeipmi plugin @<:@default autodetect@:>@])],
,
[enable_plugin_freeipmi="detect"]
)
AC_ARG_ENABLE(
[pedantic],
- [AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings])],
+ [AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])],
,
[enable_pedantic="no"]
)
+AC_ARG_ENABLE(
+ [accept4],
+ [AS_HELP_STRING([--disable-accept4], [System does not have accept4 @<:@default autodetect@:>@])],
+ ,
+ [enable_accept4="detect"]
+)
AC_ARG_WITH(
[webdir],
[AS_HELP_STRING([--with-webdir], [location of webdir @<:@PKGDATADIR/web@:>@])],
@@ -73,25 +79,25 @@ AC_ARG_WITH(
)
AC_ARG_WITH(
[libcap],
- [AS_HELP_STRING([--with-libcap], [build with libcap])],
+ [AS_HELP_STRING([--with-libcap], [build with libcap @<:@default autodetect@:>@])],
,
[with_libcap="detect"]
)
AC_ARG_WITH(
[zlib],
- [AS_HELP_STRING([--with-zlib], [build with zlib])],
+ [AS_HELP_STRING([--without-zlib], [build without zlib @<:@default enabled@:>@])],
,
[with_zlib="yes"]
)
AC_ARG_WITH(
[math],
- [AS_HELP_STRING([--with-math], [build with math])],
+ [AS_HELP_STRING([--without-math], [build without math @<:@default enabled@:>@])],
,
[with_math="yes"]
)
AC_ARG_WITH(
[user],
- [AS_HELP_STRING([--with-user], [use this user to drop privilege])],
+ [AS_HELP_STRING([--with-user], [use this user to drop privilege @<:@default nobody@:>@])],
,
[with_user="nobody"]
)
@@ -103,7 +109,7 @@ AC_ARG_ENABLE(
)
AC_ARG_ENABLE(
[lto],
- [AS_HELP_STRING([--disable-lto], [Link Time Optimizations @<:@default enabled@:>@])],
+ [AS_HELP_STRING([--disable-lto], [Link Time Optimizations @<:@default autodetect@:>@])],
,
[enable_lto="detect"]
)
@@ -121,7 +127,6 @@ AX_GCC_FUNC_ATTRIBUTE([noreturn])
AX_GCC_FUNC_ATTRIBUTE([format])
AX_GCC_FUNC_ATTRIBUTE([warn_unused_result])
-AC_CHECK_FUNCS_ONCE(accept4)
AC_CHECK_TYPES([struct timespec, clockid_t], [], [], [[#include <time.h>]])
AC_SEARCH_LIBS([clock_gettime], [rt posix4])
AC_CHECK_FUNCS([clock_gettime])
@@ -148,6 +153,9 @@ AC_HEADER_RESOLV
AC_CHECK_HEADERS_ONCE([sys/prctl.h])
+if test "${enable_accept4}" != "no"; then
+ AC_CHECK_FUNCS_ONCE(accept4)
+fi
# -----------------------------------------------------------------------------
# operating system detection
@@ -426,6 +434,21 @@ AM_CONDITIONAL([ENABLE_PLUGIN_NFACCT], [test "${enable_plugin_nfacct}" = "yes"])
# -----------------------------------------------------------------------------
+# check for setns() - cgroup-network
+
+AC_CHECK_FUNC([setns])
+AC_MSG_CHECKING([if cgroup-network can be enabled])
+if test "$ac_cv_func_setns" = "yes" ; then
+ have_setns="yes"
+ AC_DEFINE([HAVE_SETNS], [1], [Define 1 if you have setns() function])
+else
+ have_setns="no"
+fi
+AC_MSG_RESULT([${have_setns}])
+AM_CONDITIONAL([ENABLE_PLUGIN_CGROUP_NETWORK], [test "${have_setns}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
# Link-Time-Optimization
if test "${enable_lto}" != "no"; then
@@ -492,7 +515,10 @@ AC_CONFIG_FILES([
src/Makefile
system/Makefile
web/Makefile
+ diagrams/Makefile
+ makeself/Makefile
contrib/Makefile
+ tests/Makefile
])
AC_OUTPUT
diff --git a/contrib/debian/changelog b/contrib/debian/changelog
index bfb070549..479a20208 100644
--- a/contrib/debian/changelog
+++ b/contrib/debian/changelog
@@ -1,3 +1,3 @@
-netdata (1.7.0) UNRELEASED; urgency=medium
+netdata (1.8.0) UNRELEASED; urgency=medium
* Latest release
- -- Netdata Team <> Sun, 16 Jul 2017 19:28:33 +0000
+ -- Netdata Team <> Sun, 17 Sep 2017 17:00:08 +0000
diff --git a/coverity-scan.sh b/coverity-scan.sh
index 46a0c809e..770a36843 100755
--- a/coverity-scan.sh
+++ b/coverity-scan.sh
@@ -35,9 +35,11 @@ make clean || exit 1
"${covbuild}" --dir cov-int make -j${cpus} || exit 1
+echo >&2 "Compressing data..."
tar czvf netdata-coverity-analysis.tgz cov-int || exit 1
-curl --form token="${token}" \
+echo >&2 "Sending analysis..."
+curl --progress-bar --form token="${token}" \
--form email=costa@tsaousis.gr \
--form file=@netdata-coverity-analysis.tgz \
--form version="${version}" \
diff --git a/diagrams/Makefile.am b/diagrams/Makefile.am
new file mode 100644
index 000000000..420bd5246
--- /dev/null
+++ b/diagrams/Makefile.am
@@ -0,0 +1,13 @@
+MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ config.puml \
+ registry.puml \
+ netdata-for-ephemeral-nodes.xml \
+ netdata-proxies-example.xml \
+ netdata-overview.xml \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ build.sh \
+ $(NULL)
diff --git a/diagrams/Makefile.in b/diagrams/Makefile.in
new file mode 100644
index 000000000..46fbd54b5
--- /dev/null
+++ b/diagrams/Makefile.in
@@ -0,0 +1,467 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = diagrams
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
+ $(top_srcdir)/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
+ $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ config.puml \
+ registry.puml \
+ netdata-for-ephemeral-nodes.xml \
+ netdata-proxies-example.xml \
+ netdata-overview.xml \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ build.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu diagrams/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu diagrams/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml
index d8a0ab1bf..d06950b47 100644
--- a/diagrams/netdata-overview.xml
+++ b/diagrams/netdata-overview.xml
@@ -1 +1 @@
-<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0" version="6.8.10" editor="www.draw.io" type="github"><diagram name="Page-1" id="6533187f-1b6b-8515-a257-c05e29b4d991">7X1Jc9tI2uZvmYMiqg5Q5L4cvZSqKuJzt6ftnp4+VSQSCQlVJMEGSMvqw/z2yQQBCkuSBKnkIsp2hE2CJJZ8n3z35QZ/mH7/tVDzh095YiY3CCTfb/DHG4QQ4MD+5448rY5ACNDqyH2RJfWx5wNfsv+a+mD9w/tllpiy88VFnk8W2bx7UOezmdGLzjFVFPlj92tpPuleda7uzeDAF60mw6P/ypLFQ30UA/D8wW8mu39YNI/cfBIr/dd9kS9n9QVvEE6rP6uPp6o5Wf398kEl+WPrEP7lBn8o8nyxejX9/sFM3Oo267b63d2GT9c3XpjZwvODf5am+Hv8p1szBCYqtoSrb9P9bJLN/lq9f1gs3FK/cz9Ed/fZ4mEZ3+p8at+kWWEecnvRu5lZJGqh7Kt4ksf2v6kqF6awLyxd0tvEvpg/LR7yWfWyMIsit49bmFv38eqKzX1TuX729TOVi6eGHvZnc/cym1aEe1/9/66cr2gP7BHVvEmz78ae9P03UywyS9H/cQ/5OS+zRZbP7OdxvljY53j+wrtJdu8+WOTz5sz2XXcB9CRfJrerZVjaJbQPsLALXK+IKkuzKO0LxBjCgFSvKBKMYvtSYKEkISwiOBYRhIZHkpo4QkgLHDMqONO389m9vfiQeDU93b2a761D9Ur9avKpXdYn+5X6U0bZ6ifN1qMQrw48toAs6y89tDBMahCoeu/cr8/9DB9H4xV5mrctQB0TYI/ZX9nzgSjNi+j3/GsPQ2wvDO0ESHX379f7+UM+yYvqVM2O3oShARaHcA0KM8AczABHXCS6DTMF0ghDhBICFJI8DgczCBsG1uBMcDTAmUBDmAl2+TCbmrK09ImzIolm+SJLLY0dOsoe4ISBcSzjOCWKUB5HBL5VBPIVAjmmCLQQKAAEEbX8RnOaxkYGRCDFsANALIb4s9/y8DkSAoADySvqS31Tk6Vpg7EFgMeHbGG+zJV27x+t7mSX4mExddiF9mVpBeRfpkXlD/wj/MURK80mk+b4LJ858NwXKsns8rW//uHO/nFft1SrlSrL/S0canBo+30rn/H7RJUPDhfVZeemyOwKmMLdWeboUwEkd+8WbnWxe1shsPqNe7dWW9yb+4kFRv3aAiXTzZeq52kUKFxf97Na2GvNqosjAMOgAXelHkVDMEDIoQcNzcE94HAk5rMojGkrUc+6E54/zNP5dAf34ftxn1eiQ1UvBWBJgpVpCzegk8hoKg03EiexCMhaeqKNED5EE+IeMOHjyjY2WdS7276+X1RPuzoWNwfeff60PlasD87nk1qE2VN+sreU6bL5mlPf+6dzCPVcJcm+NYfKhVqUiVvpIl/k2kF5/dPW1zy//Onrh8/2d5ZpIfDPj59/3vTDEIK8ucut+wZv1xvtAWt2mhXTq3ZQhWT7fl92zrQwcdpj5/Z4ooxIdY9vEx/f3sb0AwBfyC7ypSBDmQo8MnWtDR4X+QOcrkDaohb7zzJvPojKainfOSnD5t+fP2zO8sVyXDV1Em877H07pAXVjxZysbIIQeAfprXRjgfr1mWiv1vyfsvMYxfkOIWMpQRDCKmCOBJiK8b3RbJAMWbMg2RqRELGIDmEDbJ2utRwZQx44OqxQSS/YBtkixrwTRWzrHyItNIPZjvB+X4OlesxRSCs9AXNERCqrS9gTiJIUgSNICmGAX0uEPZsEUggHSBRePhmc+y1ATFV2QTFarYDg2/VHIZohcGYo4S0MYg0imIGY82g4LFhITEIeReDCLIBBqUHg1ycQnR7dMLDpTnxSfN0biX57XyyvM9mBwj0nXprc6icO9wfepd2ez3mxV/VIlko6afW9VdnPsttTfNZtsiLri405n5CqDMV5aLPNeW2cZSVIXaoGtNWStBQebEcBGntU3cSFju/epCNSii4xRgTyYAVSbS7ZwGAQ0uzCYq1Ny07ib59gk1rZUw2n2YXv28rvXVeZJWa/+Cw+2Pj3j1fPPr986ffd9jZ6EU2yEVsXojkRW/YNuG/5u5ys3zx4LS3rdhoAWNN41NbrzifVyGWKrZn7zkqzSxxyJrlyQ5bB0K0FVnhPDiScaw8dq+B1vLlJ7N7ad/b7fFPUg8Ij+6mCQGPf5aO6n+rI72P9lfR58Ld5oNZljuAALYD4bW6wEVlTkhugGlH16RUMEpBrFNtbd1E43DmhAV6B2KEejyBxGNN0EsO726xaM33bLodXHy77vlasSVX2BKYG9HGlkWVVbdJzBLB01jTgKYq6JuquHHStdDFPfEVemRT9VjgSvJvRlu1bSu+2Hb16JXiy+lCFl8xgyJu56YIjtMoxkhhJiSIcRrSFQJoH19DdxwEvhyoIwfwXgSwjUl2TkNavVBTk1TZdbfTZAfa9uNmV+Sbq4Spc9lK0vEP6xhFqSE6Saw+mZqQ/mGAekl5yJOU18TaOv7hILkqp2d4ar47QsHoG0VgRTsBMU0IbYtcQaCMlFunRFOFEhAwowH1GCIdRspgw/w6zuEgAYpBstTAr1fvjxckTwVYJd4z7tcu9SMY97uX5OX5ZAGWhPWXBJxzScglLIndy5e0JvgS1oTA/po0jrKzrAm6iDUB/TWB51wTeAlrguVg75xzTcBFrInorQmQZ1wTfBGSmMH+kngMuJMtyUVIYtQvuAByGN8/2prAl4uZfsCGur8+Jzur/gQCEumumqBDWU2px9UE8f6rNi5xd3ekzz6zL9LXpO2CZJ1wODZbd1ys8bF+fncPs7yYqsnwLvYO+1Lfs/yUF/dqZr+UOIy5W3rIS2cs2SUF+kEV7jVQM/dxkk3NrHShoZ93PPAJI5+fzDQvnqJ/mP8s7ZfsDS56oQnUguWLYp2ieV//2reVUsM2xD65jAE4DgPyZVty7uE/8iRZ8R60vyw7eJDxsEpRKJ2DY+/M+T4Gj3KD5rsrrHG7FtT3al+9+/z7Aff7kpv15ilYkLlfp46tLFb7PP/m3EZgns1NuefyhdjCv9SrVSce9UOLofavJ1p8GtE3RvIJz3Y9RPKN8fn4jPc9kSVGbYNs1mwDpqaOMrO4nHfBNNgG6wMrjACdTyb2UVYFM6ffQt79/pOLhTu6JEmUzyaO1EprU7pdvnDJHgvncl1rByOk5WBPXbg214M0Zz4NmHkwzfdP+A+lzI3k3f+TzZbfw6DsZQllng1wCXluP01VtsK8izQUuf55z/sKITJ+n20TGdvjGScwmnt1i4J5/JK+sHqY3LQBw/d5Eo6QV+rdX3eFMe+/fNzNAs+VT3rx26x8KvVicsguOynoBea7QX8srwg/I8I/Kf33Lz/wfe34lvSM+JYefPdWoJ0t2+4Jse4wUWUm9bNg+4H4aZYk7ozrBN3p93vXWux21cwLrf53p3XLAG6pu17VbwzKW8orpBQ1zlyHjMItzftZvtAP9S3sny7uqbVO00SG0mUx7iYpCTxMCfF5Uw6wzobe3M22WIV2z+5y9ntU09Ftr5qUGxVb32YKzAHX6hl4tvieHR+HMMY9GdOIZZqYdOHhQsVyNqtKNhwZneFWGJWU1ZOUmUt3B/XDbe2O0NqGlXulw3sKY9dPxdUX3AaY55lzUtpv0/c39KPblctFXtZeijGbdJUtU66axHx1bz5G5GaY8x5CqpNu3oHEQ6633gmdBL7wXvuQotzv2W+6VFyqLP9pmcxdvpeen8Po8vXwgC+ytTa139jcsCMApCHgB0ry9cFLba7hBc0qinN7CKzDAzi22sizcDgHin81M1OoSfT7LM2jKHpenU7QaHvB/J4uuZNUxNF+pF4cL1IfJioUGOdN8vXFMu/qBv8sLwr+zaJ1wP+ykOk5wI/6aW9A+DzSV4z+55zvi8X/6hbPBP/NOfId6L8s2ngO6GM+gP7xstZeos0UZpovWhaVW1pT2Afd2gCvhYDnbBtQtNt7faiItO4gBp7zc6qEFvvBZJFN10k6z9346rh31Qbv7FXWhdEm++avs0bbG+O9hiA44/CWS3tKQATkvKes+LJPCfJgVhwnQuJ1IJ/Hwfbncjr/ffau7WaD1C7dTe1nQ2zlZ6u+/5tR7sqUesjsc8Rt6hK3ua+cx/kmeeJMqDBaa497Se7xLwCfLRbCrYrEhZMdrd2rGL2E7JNs/tsGISW08QupWFBCA6WsQdALicrRNjcIQGemGbNAR4xQRTWAUeOxOkvASLu6uPmBntHzBkF4L/FDehpeCF+9+P6ZhyOIOKKS4kdU5FBKg/NFRYaURj8ofTRKi35g86yUHlHe8IPSB1Ja9h3fZ6X0iBrSH5Q+WN+Sl0TqEXVbr4HU4pZgH7V1goSX2k3Rh5fainY++9i6sFHlIgwQ+skNnJwTByNKGn/g4Cg4YPCScHAl6UwQvD4gQNCMB1sjYdhQ7mRIGFP4/RqQQG+RHALh/O41MsK9djpiX4m9Li6U2P1+KGOJfUgz0vMHPNV8Xl58s2pXH1PXhVXxx0vIvD5ro+p3lmqjGsyj7WNTXkOPatTvsXLiHtUjOPKV+NUuVfxieD7xO2ieciW0vlTp2++odEzpu5vYmwuiXiY8BsL8f+dfdsjZHX3uAyUXLbT9TLv5p+agxKKzFoh6w2gerIQIow16b11ODPwqZQDsV1AcTwacvr3s9Kn8z2S7Ikevs5u23Yl3AiQyJt052ylPI2GI1lqqhMCA3bRZ47Jb9y5maKhONn1Cu3ULr7R7cfmfpYXVdnjtN17xtcBrNWvZUIpFe9ayYARE9umVxgkHKQ3ZrH0wDID42mQ2Jkz43sSnh1dhkmzHIJOVJXZ18GIVvFIJUNppfO3GIrIEk5QkqVYhxyIiILrowr5mvdLXFOOSRwFsGxWfl4v7wuwCGNoLYNfTfN0FpBwGWcxwB4NMsSgRKdAxZSDVAZuvr+fkrCUoGtZIIewbzSlfJwaNNYgsrUujCqskbwUiv0JOJ6siOPsyiTW3sCLY8rcVyoxV2xIIkhiKGKU84JAJInucjuBhhG2NvBc2x7oIlLnZkc6I2jUVjL9ZXscdr4NAppR1ZjulxL4iBqdYJhqHVOdEvx6UQjGUt9w3eueVanMWPUX+/WkHBLcXeF0vBKt2bQIiSpTszjphbho7J0phQVOWhINgP1ncMsJhUeZavbsGlS+bp7tYIHmr+IMr/AlIuw4TzeNIwlTrWGgmYUCTozecE1KfxYE89uwBbtaLgF/+bT77w3XpWJZ/TPL7HUjcb+rTq9D3rKSVNZuDKew4TlKpI0mFNTENJLGRASd09osZsae2CWHhkbSvFGfOsrVE34Gv7TWrrxJf0j6cfZkgps3Kc9LYEzJOVAQSRmViJEoJDIivpmHz2mptIo3taJFHkTt2ffex4GWJqdWOGZ18v6jCFcnRFYOTmqC4K0eTNNJSImgSDlHMwwGwn8Vg9bhh8jDknqmJ7JUatLP7bLaDvbH92NsVAbCKfEGsMYpR25al2hoSFCtFTZok1AT02/UHM2Hu6e7iqzs9oAvd6QC4cYzsc0+qu3zuRq7My2qW7HZEiit04Fm4VaEKyFFsMGvzu1jTSMXcyceUWziGjIThvuXgmz/p9Z3A4+LtKPXojuuYWXJIQXorjebTuuvKu0I/VJ1MjplIWh+I4vXNb90bL8om3dwYZVMrlX4nzkFuTACY8t4sWeprnYE9TJEfmSmeqlvnu4kqpi9vo/Cb6w/kmhJ+aiVGh4DoQ3XiaNo77SaMyhf1HNzcMnNTk81TYJT2ugicGKM7k99kvYVayW8fV62p/mGSZd0uHb2MEOtEs02paacgRH9mmJcQ3BeTCl8bElh8/fb16+eb/myg3awgxA7/xy9fvkb2wtE3uGNvv6iv3GVCqufdp94c1jCdoc+vI90MRliBcYf+li+ytG7/tp+sCgFQ5URkNOvew1agbo9PvEohxEjXQUvI0H8GfWGoMPH43ULoctojeLuQ3TQtyFxN86D/2OYSqk1FV32at7uT+TK8Q0Cgl3BPpMeg87aZC1obeXrDR75NN5WsegbeUaQwsFLIOepJ7TegmkWxEBhBRWTCAjrqRdPXoAYZ8kzFQ954oziuRDwD7PhesHstvqgqKxuSNIFKtV2fENGIGqqxZpwrEzCNRzYlmjWmsPDo7cyjt9NLzuI5DFNvNeTjKjct7DSSaaI7LtDURKm2rCyBMOYoYOoOBLKHu4bmndwJX6fH6+Nl+1WYXA/uUJWhDRmQqenmUmAQgTROEmS/Q2MRUIT2YYc9sGu+0zEqg1Q47VTV1zriW9XVQ9C4F07GHleUtyvCOjM/ZPnruqDpB0VfIC363kVPBpTwGNgQ7k/RSxINELzVdGK8CssaKDjsZAGQWEaYGwV1yjRWAdM5B53LEBvWVUDssa8IO5FwuJwpAqFbmp6/jp72VAPmmUR4ql4qEFxJ0wRw6xvDd3Zai14Cxnlp/Uq7VdY9Iy96U8teZhfjw8xCbz1mOK/sWgU41fiHwtxnZXWvx+ta97LWetOn6HlG09TMlrtvNXh+z/SpUcXcDUTZwkx36GJwe/7bq8zx6cdYfaaSt6kUP5HKAy/Helo85v9ST+96LBI1LBLB2zCx9DaPLfNlqBCV7AYPKBmSmnkoTY8h8uD1TmE4v8zDqNvHh9LTNWAeUvp6pzCcn9JU0LNRmgoD41jGcUoUoTyuG6q+fkpfav9H1uXf0lOicLT2jwNie+eb9YjdUEcvi8nT+8I5vJz6tkNPqvxSFRb8WtM7Cam3J37tvvJQ4e7uDt3JMRrVkURvv1UC9GSH8Gb/dDzSR0yO/PK3T5+32QItjbtannmRlWa0oWCV68e8+OsoGvzGupZm2PRdOZvOq4qW22nS1eqHSN6eFW0PZPOyzWwqh+ZuHB8hVc0HbXAn7+4C2QI9lCJfOTv1spjTGAMXNLvryqdp9htrQI9/DGIfFEKYC8M9igaEf7BrOqk63/cAYB9x0ZMopsz+q+LqC26B5nk2W1S3R9/f0I+OSstFXtZEGQOPVUiktHs/m91/dW8+Rj5LLgQpBuNU2LDNjk/uH0fsD3dgucgLNzF7nZq8+tcjEtK8eFRFMrJJ8pXRUcKu+sbksFW/z/wO2qW33Qi7pknhpCxQVvl9cLJ0bFPtnlPzGA66AGPNh/jdnn0eTthLxrHyOH0MtIYEHyPsg/COXmtoCDzZfRtE+nHqo4YUGZGbbg2Ib2uLwMySWux+tHqg/utm41wOstJci6f/6xjGrSC8OfDv6gCq0hiqA59NkdnHcwtfsZZyoYpFc5nc3ltz7C6bTJ5vpPVurV88w+Ff9Rr7rJXGxzsAzgf+Ef7SKCyf1cLe06w6KwL1RevHc9ka5nu2qB+ufvfvzrv+Y23ElLV1Cm38WheENUXs89+bxSY61p4Hk9ybrdhsIc8HvOZYYSZ2638zndv0gbG+wmcnDFrQR73CQAZ751g9cv2zZ0wPz4R7U4+Y7FVtrNZlcCYLH/XU+lotrrbccv9C9fuNd9avuRN8zx9YftD5hX2xuuvn7b2m3OlTZcqJ0n9tqy0agnC/rPBryZaR2JludybWsaZJVYzA6mwZQFmUpEIaoCEwTAVsfga6WForoZ2iJ09TqgMshJMn8M6X5UO8nEzMYj/8vdlM3lX/UQGxSTvZWpjDSCapTmOYIp4GzOQlvXE4GA8VakR9LVsuuSlajb8kK3VeJHuBj1xnexZRIUtShQRp54hTgSIjiaE4NTHUIdtRNWMvGs4GPZzNl236KjibpU2RLBdP+2ELXSO28KplrcIG43a5lZAKRBRrmVjBmaIkYI4p5j2LrOmx0uZaTcz0lI1/QmBrYSbmvlBb68GH0Novw/lVQMsqZK6LnjUwCeSVQNS8hhbALJJKUQssAxigAaElu20cfQqZr4VZ87OLRtZjNsny/XC1XyPkV4Ery7JWXY41SGXc7lYmgRAR55Zh0ZQCTIJ2Oe6mkyDgyYr3uS5JkEq93S6kRuxealhoHRM6cXuDcDPdGe+mS3ttPW93A3mMKEQTTm8R/N/5ckDzgBGEiUkXh8UPEGje1zcGA9ADNgGxtYY6HDNDThUToiMyvPrctkUT/5qGMI1HsPSwvHlVva+VMrDDm7WbyUFjJmNoJFJxzZtH57Z8+HB3VwX5PQkA68/6mDtKk69eIgtrtNk27nx9yPeXBCNwNyJysJnXbwzezPKZafDVAt5AJuypBfQlxrTUytyukKbm2R9TNbNQnFa06dMfAC4/shPRuHGZNCmFngJpX8HEEeKUVUTRU2Lg+HpUP7qrMSiqu9g9SXjvppDe2oVh5DNAccXhT1lm0/nE3NovmDR1ov9b9UY9mtIS4H8d9dqDFe6y0ecqj9ssP/nCdKPdLx5HXTRHfvrws/32h3z+VF8YoGqw5V4DoS/oCT/kZVWG87VU+bLMytf7JD/tuPWXnV1X60TAol6n2/vikDHgmyzh3mbZatrS7T23LkfvDSCOJOuFLKmgt4wxTgQgTHDPfEWfLQrFifIZqK9Ee0+5463qe7AP4/Bjvhl3A6lRi2WVeGW1Ufdcs8T96zpjL+ztrj7K05sqMavLpYcSzMeRfVLNs/PbuF88VBedm1lUJxSgD27TGzWJFtm0fjs3RZoXVt9xn9f3/dC0SG73Mt57b11e9lioXYBlP3A/gD0VW7IpwireI2rTW1k6tULdIkUnV8af57I5L8aTWzN2vS8kNwX3QwD9WsrRqSmDfI7BqTbkptzsTO0YAYMR9T6H21+bTKBj22Xqv5Z13q740R8tbnQi4wv2Crrk0NdKfNbX/gn4uwnMRvhV34ZjZzWRRksao04MGogkYhQqII00WqtX69hhPccOYUMB42t809QshcXdiJLvy3Xs5GmaaXPr4FWu/h3SHQGO3ouT8ZQOaTnw+IrDTJoZQVp0QplBgft7OpJbWn9zRF/9/0eqiunwpj6+I1bBPxHpYc944p6KrkYb6YiTo2zrEdVbb0OcrNpeJkTz7qRaykUEgJYmYUAjKF+tOBn062N8qMd4vcgBGjAOgTcuUHD9wCMV8BCnyjX2bQWoEgQjDJiKBSEpj19vgEp2UxUkGHZ7Yh415jj8boR99MNMbl2hZ932ypE5vaUcEgghRm7+cvd0403mvjq09bRHNJ+5z7raXGcHnGceKJ8/r+0zA5ZdVH67oWuv8i0+uTwJkM3KhVrdnlsbN4MKrN7bzwsH0adyYaYrhjO+M1hnNE3kTrOcz/PC8aLhXTtvW5SYRZ2u4ndGds64chUOTvRfU+T2uKv2z+6XhRp7ujQvKodjWblP5+tiw3LtlWxWYdMSDJ438z5py/+5iSzGsji3s6bVjMPmTBVdaoq4/R/nVfmsJZ/9589ldeMwKo19dHf4J51PJs1ygoVblG9ZubR89r/Vg/08kmr+p0hdNtHGB1gtpoNn6qo0weIhX5arVZyu5zbOTYWt+n4/NPCbGeP64K6f6MbtQvDh8z9vag+2O21p4T0xFZ2L8V0ofI9X3VV/jFLvYY3T6LOtO6lz688P/ul9c9f/ePdpjaSZI0aSlX/Z/3635/q7+2Sx3nWPD1n1bNU5i+VsM+RGPeCkzFvLW4X0djzuNDZJUjnHvRvngw8Q5vvCjY7d+qMxa92JF+zetj/9+vl/HLKtZH//8z7r1Ivjjv7p7e1tTcQ1L16RaLVrHx9MYbYwyuMFJg4PzkERRt0aeLKlpydRqHDc6ad0P5r4j0nem6qJ3mqNF6m8oHY3avtB23owgkZQo9RqN4nCDAfsyC26+jxcx8E6rfyRB2D9ct+LymrfgjmrzN3nSdzFHN6vtOt6MIer6i+Lg0QkHYvVeU5wzBS1AhjCkOnuRKAe5qjHaQY8HtMGrK8NcrMy6cFte0usa4WbtAagfWkooETFriBM09ozp6mIeMKgMkwrHbLYUMhuviVEaOifh77ecGHGnA6bbo3wljk6zsc/fGWczewlay3npo5bb1wU3itlgrhxpnT6lnjK5A6YSnb6DZeV+o/kQc/72w69yW1nNQtS+yXtDuwkzkMcR8QwA2KSUEPjjdtuDdrd+66BlK8RjgdQ+DhFTN5WdiFm1n/87UO7q2M3i+sSbZHnYNjmApk9yNuw1aGeGKgR7unZRWnN7LzoTwp6m61OrEooK2aBNNIEdFRCSiPEBREgVjw1AWU07E2GgRQO4wqQegIL5JKHZG5so6ofVLEoVx1Vl4uqj2oXfDtaTRxbPcC0px5QMPQ7eM3Cptr+yuiB95tc+jpqnWVVQ4/jFMUaVGpB2jgcqIq4AURZBUFovHmnNyryiK3ejmmdHDh/6zZLbvkP//n5y2hH5dc8n2z0HJ8Lmdu9ZRehgUC8WwPZA0nfu4h5umlzuh3dPsPYdGfkT2qul/My8cmM/ZwKr4NHMVK1kEFYE4J0NzUURpTHlAojFYpBQAcV7+bWWMUDDaWfrz/RkWbYNqfdJ7/mevTRqvWeQIQJCbvJwVBEKgUwThjXsULhEGBlYxcBXAwR4BtizPf3UA6pfUA21RVRG1XUpgzFnSCIoCmNMNQ85cAAu/ED9vURrE9tX5RtSO3GZ/YiarP9qf1aWHfVsxAlMTW8w7oxN1GMEsNVEseCh+xZ2OvlDrmngYLw+HoPmLY2JOWI4bGvlZTVsGAkNUyIaJMScRpJSTQXVMM4TQN2ReK90CT3uZx8kyUDpLiSERV418qDZdWP+S4mGtjXut3sllMYxYylKiGcWfMwoAeI96I0wkNt6VG5eIDOJ00T97dIbbu3V/6+lIGUdZo0JpZNKxULbJUvKUjAtAPRVLiu97YcxuSkZ2+z/c23YaLoAVV310NtWtlTGMJYkg61ieXkDEhmgIo5QAH7Jg46DPioDaFHKh8QbBySe1yx27WSuzKeMDI0he38DmHtpQhKbThKJUZxQFZOBtq0Z297AoEHTIQeEhu9ZWKTVTJPYqiOOwo3gWmkLPeMlbCfURlQbhPep/ZQbvsccSHkNj2g4O16qE2roD6WVBnT9ovIGMSR4KlBSlMBoQ5HbcbIbmr7tLQDslGH1H7TXjBalZZhJSlJujWNVidPY20sq09lQgJaYJz1LTDP8HJfw2MewC/ibfPzdqjNVtQ2KVUdL5hj7JRgA2KOpRXpAXXy/t6Wnp6n0OcGO2Dew3GbWdcHqglmv+dfVyd9rhNrLcPVhElo5TbF1OVOgq4wIBEwmNLUEBaTkGo97rZSgaJxvnXCJB4vOUF7A8YTJjlD6vhT+Z9JF03sKmeC0MolgHWqEtFx9ymMI4CtlAExhloFVC1Eb3wjRB5h422BLfdnP2OCbiObpFyrABIrBGiiWYefWMRHJmEJTQGOYUpCzhvqCSDhGcDOfMplgKDbyL4p10rtKlKDYxkb3akCoTqJIE+VxAnmKmQViEBdF6CUnqm7HtnBAgRqxjRK8UzB7LQL6LURGHYOcPkud2qaTdwD/mYm34wj9IbEmE4jNvenlXezbt+wTy+CznzNuvNBe76m+1V3vubzqEt60xp1uY3a7aGWzQZqz7CsSXUh7Q4E7bkhAerNmRzb40D2JvNJfpyBlf07lrVRdOAwyeE+GGFRDxto7LcNDmygUe2M1mxZetOeLNuCK2yD9ZbTPvabzdsfLjsK1NSDaXRRoOZ93zqEB4JawP72YONQfQjwRqS4nGigjGqPkrE4q5DslgTKW+rmGbcHvszywtH0/Sxf6If6FnqdehJqREJ8wBcoxszVYnh6+EieAM7DSFaMum4bhoeKdKBSjSFhrzebhVXuVkIh6CYmSQniiMUaSEONYiSgc33tNn0OintCKb5slgCtBdm1ZrNI7IpL7jiIjQttt/MbLFUjpIHRhGnNRcCMwn6YRHjr7D2+1EZGv8iX+qYzClnlGiOIc4m7o3MRipIYGAJpIhMQ0JTtj/OypuywPd/RTNk3nLtkqV2ZskSoNO1mqlGjIxhLQAwmiLCAnnMs4W5q+5IOZQg2/aZzl9gqmyXlMBYdxwUiIEoJwYQDJZUKKJQpH1DbI5Q9nosQe9vb0+7tULtKZiFWPmPKupUAMEoFYoBiqVOUhIyB9jPV0NBP5R3OGaLTKn/TPmiGV4I75Rq2WblMUxgpzlKoTZpgBAPGtAgdQW7s2d2YByA3etPkZisDK+aAq37IgVFJklimIoYBtXLWdwmyJpjUCTp56s5D1AvwEe63KyY3rckNUt1JVpOU2n8AwRQRI0J2O+O0r5Z7rDDpM8JCUPtaXSOWVG4VrMBDIlWOUWvY2NNxEiXAvkxMIpEIKJchGPhGwEhOfUDLiF2DNMd2lIHC11Fm1QW3OdeyOZy0Ks2Xnqaaw84zL82bee5zHK3uKYncqIVMm15vFL69N8qrKTrfn4P0Y9SgUdHapl6YwdBjkhT4QZPCzhLBIQh2Yji3UMKxwZrqV59Nkdk1c27+PcKSmjGREMQIVVQD14V/9aV2TKcJ311ITEfgrrNQNkTdO6TDe9os6I8TChfS4W/YD3GWijkO+vG6poytjVJPDRUJoMyIA/wQr0KZOUucZ6CXAjyUKtTjLj6gd96QlAf4GF4FKc8S5xlYlKBZzh15TSEmpDS5tq9AGxjkc4RL2hgl8hsGdikivzeWUsqevjg+i6PPShpQhBf5Yr80unPCjZ8bb416dCF469sxB+ONi75B1FcvAuLt9aSrnR1vjVS/FLzhvlgEB6apcYJ7Z6JHS1MTPpddx+Oj10B5dupgwxKQmKGf53m8SaLMtDND6fK7CPtSlX9h7q+7n3yhav1ObjXN93DwwaYAo5nUzDyueRjG1bLLv+dpC2kXazbK5Ud8Lr/BjJ71JBTvfJ7m2Oqie47XCXPHrRlbwN7sTTVsKs2LqZp1xgr5bnH3TKAQ3suvDyaqrBClJ6aM8jRSkb3TaDUtNrKrG6XVFKi2J1Ns92Q+PmQL88XuC/f+sVDz7j7s5X6maYq09kmRhMWupjNMAkLPPQTXwws6Aw08O+OA7iljnJBihBPyRNm8fy6n899n79o5vZDeWuWrTupFbJXUW33/N6PclSntcTp248/6vbhMXzJI/kaemjkfj6Qhui/0By4e0kbpwsfAJjN4m1WDD2eJKWoeVLMdd9x5HYw9OjVWeYnULIniZZnNTFlG7mPLe6xEuYMuieDOOS6OPfcVEt82SifZ/LddKuYePo4O6MjQVQWlB3Kwb2Ic5OHYA2J6WUye3hcWGaaSsNtZeQWetZ0wmkBrjHkIZKmD7uQYArW5jVWEK24TgFCipyuvc5Lag5a4J959gAo1IJU8wEV8LQEBUSUA3wkKaZyYTrsOwVSkCUsYUJLBJKAXWfYbLxFPQNwXm+zX+bxAYbYG7SxZzVBtzVPdpt1N8lmSz26nT1Gt0N1mefVJW0tbeSs2amlXCyPpNLs7HRtBiKnyW00NIyt6Iy6kTCBBGqqmrUOf04RQMrpmGPKUoMDmrB1+37fwQ3d52NOqYaPssDVqb/8sDzDGPGbX6NyQnWbXs/9g1Q9+o/9gT8PwZXe4mm09e3q2Cx3+97u1zl6nL7HIhoK7+uMR3NT9HSOcw+8h0kwhaO2hdRPSTpbhcfrJyxGpaK+0/PI0Jvh6jH3jmwLDgo8jT8PSy9LJspacLbcL2rFulA/ViaOPvdOusfNGx2JJN1Duzsg4gSImLVEsDdYRRxQTFMc60ep4ohj2BlPsIYuPM2RPXo7n53UyEta3pT2DB45Uqi1HmNI/aLdtN0p6NuKtaxJ+UO9A6vW7JFAwDC8dWYR/+T+/2gOxSu5dfjVIi0qeu7NlOpAw/9XMTKEWLqP7fX0d1IEReqPSfFV9IiWOQZUa1oy+EwmNI2yMgpDHcUI3GdbdWCkOZCb0OioS6pHvzCPf0f4lDWPEOwQjMnx+cJmtbR+65aNnMBTCMBI1v62cDrflQ5+FvFHfHGNVk0wSgzSBnXpFA2HEiIk1xZwxHXBSlQC95Jc1UHaUn9Agkd8zdFw1U6204yM90LE3CjoOVqCDiqWd7PQYgwhLCIkEnCIcsM9v1Q22Azo0FEvM4706YDzaHqCzi9bWliwg8HfXXjFz/sinfFk4SOgimy8C6VJWvJjsm1Olmv7S9cUjp7lFVtpNJlHnis9w3a/n9BXBFVVwTZBJVKcbCxY0YirhIjYk4VpvV7PsWzRQukJ5VTDsymjiKST1BUkPKAoepXTBN930ga+aPiQk7TUephBFNIkBx4YjIwPOOVj/5HnOwRABTcZMxz0fLnAahkWZqcomkdV6s9RS0mGgz4ng9ujKK0vPORBiq84Dqd3CSUeGJoZFzHIjK0eVUuty9efUENA3cu4n9ip9g2dzdrm9sUw359kn07xlu9Rp7fskBG1K2z1GqIvjbkEA8nFT3/SnIKGu02uohYrjbDH9T3+fvdEwCeOr8skUCt5p6yERlvatAgJRpSQKOXAM9RJika+5oq9PZlNTeZGgq3n6fFk+5C6BeStbf6sKJqh4OeAAm6Q7Y1gmEWI8ZRAbSEOOh2e94C9shs51vHSeOTkHdAQbpTCiAzLtrlKwkwoMiFMVp51ugAlyVVJMxYKQlMfxsdNwkdfnGQB7suv/8Y1W9PmHDyguPr0kvc//MN/n31TR422dJXkrvI0zKtyD30lIUBKnKiJS6KaphEARFAllmiVCkpDz02WvogChYVgM+loR0KM6ewKjbpYnpsJcWuSzbNmXpmi/qNipuyE8Pj7e1jdeP/P/czupyBf2oiaJpibJqgIF92sHrPrLrgQq72BlAAwPfDbXIaEeVjAZBsAhREOskCC6vnPJ5fmi9fVfXcTok6Wt+9H/Bw==</diagram></mxfile> \ No newline at end of file
+<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0" version="6.8.15" editor="www.draw.io" type="github"><diagram name="Page-1" id="6533187f-1b6b-8515-a257-c05e29b4d991">7X1Zk9s2uvZvORddlVywC/ty6SWdpGo84xN7vvnmKgWCYDcTSVRIyXbn1x+AItVcIIlSQ0urbVfZEiVxwfvg3Zcb/G767edCzR8+5ImZ3CCQfLvB728QQoAD+5878rg6AiFAqyP3RZbUx54OfMr+NvXB+of3yywxZeeLizyfLLJ596DOZzOjF51jqijyr92vpfmke9W5ujeDA5+0mgyP/idLFg/1UQzA0we/mOz+YdE8cvNJrPSf90W+nNUXvEE4rf6sPp6q5mT198sHleRfW4fwTzf4XZHni9Wr6bd3ZuJWt1m31e/uNny6vvHCzBaeH/y7NMW/4j/cmiEwUbElXH2b7meTbPbn6v3DYuGW+o37Ibq7zxYPy/hW51P7Js0K85Dbi97NzCJRC2VfxZM8tv9NVbkwhX1h6ZLeJvbF/HHxkM+ql4VZFLl93MLcuo9XV2zum8r1s6+fqVw8NvSwP5u7l9m0Itzb6v835XxFe2CPqOZNmn0z9qRvv5hikVmK/sM95Me8zBZZPrOfx/liYZ/j6QtvJtm9+2CRz5sz23fdBdCTfJncrpZhaZfQPsDCLnC9IqoszaK0LxBjCANSvaJIMIrtS4GFkoSwiOBYRBAaHklq4gghLXDMqOBM385n9/biQ+LV9HT3ar61DtUr9bPJp3ZZH+1X6k/ZaiURaLYeRWx14GsLyLI+9tDCMKlBoOq9c78+9xN8HI1X5GnetgB1TIB9zf7Mng5EaV5Ev+afexhie2FoJ0Cqu3+73s/v8kleVKdqdvQmDA2wOIRrUJgB5mAGOOIi0W2YKZBGGCKUEKCQ5HE4mEHYMLAGZ0LCAc4EGsJMsMuH2dSUpaVPnBVJNMsXWWpp7NBR9gAnDIxjGccpUYTyOCLwtSKQrxDIMUWghUABIIio5Tea0zQ2MiACKYYdAOIGbC382W95+BwJAcCB5BX1pb6oydK0wdgCwNeHbGE+zZV2779a3ckuxcNi6rAL7cvSCsg/TYvK7/h7+JMjVppNJs3xWT5z4LkvVJLZ5Wt//d2d/eO+bqlWK1UQOzjU4ND2+1Y+47eJKh8cLqrLzk2R2RUwhbuzzNGnAkju3i3c6mL3tkJg9Rv3bq22uDf3EwuM+rUFSqabL1XP0yhQuL7uR7Ww15pVF0cAhkEDZh0wUDQEA4QcetDQHNwDDkdiPovCmLYS9aQ74fnDPJ1Pd3Afvh/3eSE6VPVSAJYkWJm2cAM6iYym0nAjcRKLgKylJ9oI4UM0Ie4BEz6ubGOTRb277ev7RfW0q2Nxc+DNxw/rY8X64Hw+qUWYPeUHe0uZLpuvOfW9fzqHUM9VkuxLc6hcqEWZuJUu8kWuHZTXP219zfPLHz6/+2h/Z5kWAv9+//HHTT8MIcibu9y6b/B2vdEesGanWTG9agdVSLbv92XnTAsTpz12bo8nyohU9/g28fHtbUw/APCF7CJfCjKUqcAjU9fa4HGRP8DpCqQtarG/lnnzQVRWS/nGSRk2//b0YXOWT5bjqqmTeNth79shLai+t5CLlUUIAr+Z1kY7Hqxbl4n+Zcn7JTNfuyDHKWQsJRhCSBXEkRBbMb4vkgWKMWMeJFMjEjIGySFskLXTpYYrY8ADV48NIvkF2yBb1IAvqphl5UOklX4w2wnO93OoXI8pAmGlL2iOgFBtfQFzEkGSImgESTEM6HOBsGeLQALpAInCwzebYy8NiKnKJihWsx0YfK3mMEQrDMYcJaSNQaRRFDMYawYFjw0LiUHIuxhEcOj4kx4McnEK0e3RCQ+X5sQnzdO5leS388nyPpsdINB36q3NoXLucH/oXdrt9TUv/qwWyUJJP7auvzrzWW5rms+yRV50daEx9xNCnakoF32sKbeNo6wMsUPVmLZSgobKi+UgSGufupOwmFEWZqMSCm4xxkQyYEUS7e5ZAIZOVNgExdqblp1E3z7BprUyJptPs4vft5XeOi+ySs1/cNj9vnHvni4e/frxw6877Gz0LBvkIjYvRPKiN2yb8J9zd7lZvnhw2ttWbLSAsabxqa1XnM+rEEsV27P3HJVmljhkzfJkh60DIdqKrHAeHMk4Vh6710Br+fKT2b207+32+CepB4RHd9OEgMe/S0f1f9aR3q/2V9HHwt3mg1mWO4AAtgPhpbrARWVOSG6AaUfXpFQwSkGsU21t3UTjcOaEBXoHYoR6PIHEY03QSw7vbrFozbdsuh1cfLvu+VKxJVfYEpgb0caWRZVVt0nMEsHTWNOApirom6q4cdK10MU98RV6ZFP1WOBK8i9GW7VtK77YdvXoheLL6UIWXzGDIm7npgiO0yjGSGEmJIhxGtIVAmgfX0N3HAS+HKgjB/CeBbCNSXZOQ1q9UFOTVNl1t9NkB9r242ZX5JurhKlz2UrS8Q/rGEWpITpJrD6ZmpD+YYC6Chu0kBwAsom1dfzDQXJVTs/w1Hx3hILRV4rAinYCYpoQ2ha5gkAZKbdOiaYKJSBgRgPqMUQ6jJTBhvl1nMNBAhSDZKmBX6/eH89IngqwSrxn3K9d6kcw7ncvyfPzyQIsCesvCTjnkpBLWBK7ly9pTfAlrAmB/TVpHGVnWRN0EWsC+msCz7km8BLWBMvB3jnnmoCLWBPRWxMgz7gm+CIkMYP9JfEYcCdbkouQxKhfcAHkML5/tDWBzxcz/YANdX99TnZW/QkEJNJdNUGHsppSj6sJ4v1XbVzi7u5In31mX6SvSdsFyTrhcGy27rhY49f6+d09zPJiqibDu9g77Et9z/JDXtyrmf1S4jDmbukhL52xZJcU6AdVuNdAzdzHSTY1s9KFhn7c8cAnjHx+MNO8eIx+M38t7ZfsDS56oQnUguWzYp2ieV//2reVUsM2xD65jAE4DgPyZVty7uE/8iRZ8R60Py87eJDxsEpRKJ2DY+/M+T4Gj3KD5psrrHG7FtT3al+9+fjrAff7nJv15ilYkLlfp46tLFb7PP/i3EZgns1NuefyhdjCP9WrVSce9UOLofavJ1p8GtE3RvIJz3Y9RPKN8fn4jPc9kSVGbYNs1mwDpqaOMrO4nHfBNNgG6wMrjACdTyb2UVYFM6ffQt79/oOLhTu6JEmUzyaO1EprU7pdvnDJHgvncl1rByOk5WBPXbg214M0Zz4NmHkwzfdP+A+lzI3k3f/IZstvYVD2vIQyzwa4hDy3H6YqW2HeRRqKXP+4532FEBm/zraJjO3xjBMYzb26RcE8fklfWD1MbtqA4fs8CUfIK/Xur7vCmLef3u9mgefKJ734bVY+lnoxOWSXnRT0AvPdoD+WV4SfEeEflP7Xp+/4vnZ8S3pGfEsPvnsr0M6WbfeEWHeYqDKT+lmw/UD8NEsSd8Z1gu70271rLXa7auaFVv+707plALfUXa/qNwblLeUVUooaZ65DRuGW5u0sX+iH+hb2Txf31FqnaSJD6bIYd5OUBB6mhPi8KQdYZ0Nv7mZbrEK7Z3c5+z2q6ei2V03KjYqtbzMF5oBr9Qw8WXxPjo9DGOOejGnEMk1MuvBwoWI5m1UlG46MznArjErK6knKzKW7g/rhtnZHaG3Dyr3S4T2Fseun4uoLbgPM88w5Ke236dsb+t7tyuUiL2svxZhNusqWKVdNYj67N+8jcjPMeQ8h1Uk370DiIddb74ROAl94r31IUe737DddKi5Vlv+wTOYu30vPz2F0+Xp4wGfZWpvab2xu2BEA0hDwAyX5+uClNtfwgmYVxbk9BNbhARxbbeRJOJwDxT+bmSnUJPp1luZRFD2tTidotL1gfk+X3Ekq4mg/Ui+OF6kPExUKjPMm+fpimXd1g3+UFwX/ZtE64H9eyPQc4Ef9tDcgfB7pK0b/U873xeJ/dYtngv/mHPkO9J8XbTwH9DEfQP94WWvP0WYKM80XLYvKLa0p7INubYDXQsBTtg0o2u293lVEWncQA0/5OVVCi/1gssim6ySdp258ddy7aoN39irrwmiTffHXWaPtjfFeQhCccXjLpT0lIAJy3lNWfNmnBHkwK44TIfE6kM/jYPtjOZ3/OnvTdrNBapfupvazIbbys1Xf/8Uod2VKPWT2OeI2dYnb3FfO43yTPHEmVBittce9JPf4F4DPFgvhVkXiwsmO1u5VjJ5D9kk2/2WDkBLa+IVULCihgVLWIOiFROVomxsEoDPTjFmgI0aoohrAqPFYnSVgpF1d3PxAz+h5gyC8l/ghPQ0vhK9efP/MwxFEHFFJ8T0qciilwfmiIkNKo++UPhqlRT+weVZKjyhv+E7pAykt+47vs1J6RA3pd0ofrG/JSyL1iLqtl0BqcUuwj9o6QcJL7abow0ttRTufvW9d2KhyEQYI/eQGTs6JgxEljd9xcBQcMHhJOLiSdCYIXh4QIOCoh4RhQ7mTIWFM4fdLQAK9RXIIhPO718gI99rpiH0l9rq4UGL3+6GMJfYhzUjPH/BU83l58c2qXX1MXRdWxR8vIfP6rI2q31iqjWowj7aPTXkJPapRv8fKiXtUj+DIV+JXu1Txi+H5xO+gecqV0PpSpW+/o9Ixpe9uYm8uiHqe8BgI8//NP+2Qszv63AdKLlpo+5l280/NQYlFZy0Q9YbRPFgJEUYb9N66nBj4VcoA2K+gOJ4MOH172elj+ddkuyJHr7Obtt2JdwIkMibdOdspTyNhiNZaqoTAgN20WeOyW/cuZmioTjZ9Qrt1Cy+0e3H519LCaju89huv+FLgtZq1bCjFoj1rWTACIvv0SuOEg5SGbNY+GAZAfG0yGxMmfG/i08OrMEm2Y5DJyhK7OnixCl6pBCjtNL52YxFZgklKklSrkGMRERBddGFfs17pa4pxyaMAto2Kz8vFfWF2AQztBbDrab7uAlIOgyxmuINBpliUiBTomDKQ6oDN19dzctYSFA1rpBD2jeaULxODxhpEltalUYVVkrcCkV8hp5NVEZx9mcSaW1gRbPnbCmXGqm0JBEkMRYxSHnDIBJE9TkfwMMK2Rt4zm2NdBMrc7EhnRO2aCsZfLa/jjtdBIFPKOrOdUmJfEYNTLBONQ6pzognrNiikntFOkPtG77xQbc6ip8i/Pe6A4PYCr+uFYNWuTUBEiZLdWSfMTWPnRCksaMqScBDsJ4tbRjgsylyrd9eg8mXzdBcLJK8Vf3CFPwFp12GieRxJmGodC80kDGhy9Jys7sJDDog89uwBbtaLgF/+ZT773XXpWJa/T/L7HUjcb+rTi9D3rKSVNZuDKew4TlKpI0mFNTENJLGRASd09osZsae2CWHhkbQvFGfOsrVE34Gv7TWrLxJf0j6cfZkgps3Kc9LYEzJOVAQSRmViJEoJDIgv1NPk1nkF7WiRR5E7dn33seBlianVjhmdfL+owhXJ0RWDk5qguCtHkzTSUiJoEg5RzMMBsJ/FYPW4YfIw5J6pieyFGrSz+2y2g72x/djbFQGwinxBrDGKUduWpdoaEhQrRU2aJNQE9Nv1BzNh7unu4qs7PaAL3ekAuHGM7FNPqrt87kauzMtqlux2RIordOBZuFWhCshRbDBr87tY00jF3MnHlFs4hoyE9Q1X6htb7PWdwOPi7Sj16I7rmFlySEF6K43mw7rryptCP1SdTI6ZSFofiOL1zW/dG8/KJt3cGGVTK5V+J85BbkwAmPLeLFnqa52BPUyRH5kpnqpb55uJKqbPb6Pwi+sP5JoSfmglRoeA6EN14mjaO+0mjMpn9Rzc3DJzU5PNU2CU9roInBijO5PfZL2FWslv71etqX4zybJul46eR4h1otmm1LRTEKI/M8xLCO6LSYWvDQksvn75/PnjTX820G5WEGKH//bTp8+RvXD0Be7Y28/qK3eZkOopSdSbwxqmM/T5daSbwQgrMO7QP/NFltbt3/aTVSEAqpyIjGbde9gK1O3xiRcphBjpOmgJ8cQBfGGoMPH43ULoctojeLuQ3TQtyFxN86D/2OYSqk1FV32at7uT+TK8Q0CgFwsi0pN85m0zF7Q28vSGj3ydbipZ9Qy8o0hhYKWQc9ST2m9ANYtiITCCisiEBXTUi6avQQ0y5JmKh7zxRnFciXgG2PG9YPdSfFFVVjYkaQKVars+IaIRNVRjzThXJmAaj2xKNGtMYeHR25lHb6eXnMVzGKZea8jHVW5a2Gkk00R3XKCpiVJtWVkCYcxRwNQdCGQPdw3NO7kTvk6P18fL9qswuR7coSpDGzIgU9PNpcAgAmmcJMh+h8YioAjtww57YNd8p2NUBqlw2qmqr3XE16qrh6BxL5yMPa4ob1eEdWZ+yPLXdUHTd4o+Q1r0vYueDCjhMbAh3J+ilyQaIHit6cR4FZY1UHDYyQIgsYwwNwrqlGmsAqZzDjqXITasq4DYY18RdiLhcDlTBEK3ND1/HT3tqQbMM4nwVL1UILiSpgng1jeG7+y0Fk2hwEXQ+oV2q6x7Rl70ppa9zC7Gh5mF3nrMcF7ZtQpwqvEPhbnPyupej9e17nmt9aaP0dOMpqmZLXffavD8nuljo4q5G4iyhZnu0MXg9vy3F5nj04+x+kwlb1MpfiKVB16O9bT4mv9HPb7psUjUsEgEb8PE0ts8tsyXoUJUshs8oGRIauahND2GyIPXO4Xh/DIPo24fH0pP14B5SOnrncJwfkpTQc9GaSoMjGMZxylRhPK4bqj68il9qf0fWZd/S0+JwtHaPw6I7Z1v1iN2Qx29LCaPbwvn8HLq2w49qfJLVVjwa01vJKTenvi1+8pDhbu7O3Qnx2hURxK9/dHZ0JMdwpv90/FIHzE58tM/P3zcZgu0NO5qeeZFVprRhoJVrr/mxZ9H0eA31rU0w6bvytl0XlW03E6TrlY/RPL2rGh7IJuXbWZTOTR34/gIqWo+aIM7eXcXyBbooRT5GmhRL4s5jTFwQbO7rnyaZr8+CXr8YxD7oBDCXBjuUTQg/INd00nV+b4HAPuIi55EMWX2t4qrL7gFmufZbFHdHn17Q987Ki0XeVkTZQw8ViGR0u79bHb/2b15H/ksuRCkGIxTYWKU3D+O2B/uwHKRF25i9jo1efWvRySkefFVFcnIJslXRkcJu+obk8NW/T7zO2iX3nYj7JomhZOyQFnl98HJ0rFNtXtOzWM46AKMNR/id3v2eThhLxnHyuP0MdAaEnyMsA/CO3qtoSHwZPdtEOnHqY8aUmREbro1IL6sLQIzS2qx+97qgfrPm41zOchKcy0e/79jGLeC8ObAf6sDqEpjqA58NEVmH88tfMVayoUqFs1lcntvzbG7bDJ5upHWu7V+8QSH/9Rr7LNWGh/vADjv+Hv4U6OwfFQLe0+z6qwI1BetH89la5hv2aJ+uPrdfzvv+o+1EVPW1im08WtdENYUsc9/bxab6Fh7Hkxyb7Zis4U8H/CaY4WZ2K3/xXRu0wfG+gofnTBoQR/1CgMZ7J1j9cj1z54wPTwT7k09YrJXtbFal8GZLHzUY+trtbjacsv9C9XvN95Zv+ZO8D1/YPlB5xf2xequn7b3mnKnT5UpJ0r/ua22aAjC/bLCryVbRmJnut2ZWMeaJlUxAquzZQBlUZIKaYCGwDAVsvlZF0trJbRT9ORpSnWAhXDyBN75snyIl5OJWeyHv1ebybvqPyogNmknWwtzGMkk1WkMU8TTgJm8pDcOB+OhQo2or2XLJTdFq/GXZKXOi2Qv8JHrbM8iKmRJqpAg7RxxKlBkJDEUpyaGOmQ7qmbsRcPZoIez+bJNXwRns7QpkuXicT9soWvEFl61rFXYYNwutxJSgYhiLRMrOFOUBMwxxbxnkTU9Vtpcq4mZnrLxTwhsLczE3Bdqaz34EFr7ZTi/CGhZhcx10bMGJoG8Eoia19ACmEVSKWqBZQADNCC0ZLeNo08h87Uwa3520cj6mk2yfD9c7dcI+UXgyrKsVZdjDVIZt7uVSSBExLllWDSlAJOQXY5ZN50EAU9WvM91SYJU6u12ITVi91LDQuuY0InbG4Sb6c54N13aa+t5uxvIY0QhmnB6i+D/zZcDmgeMIExMujgsfoBA876+MRiAHrAJiK011GH7QHKqmBAdkeHV57YtmvjXNIRpPIKlh+XNq+p9rZSBHd6s3UwOGjMZQyORimvePDq35d27u7sqyO9JAFh/1sfcUZp89RJZWKPNtnHn60O+vyQYgbsRkYPNvH5j8GaWz0yDrxbwBjJhTy2gLzGmpVbmdoU0Nc9+n6qZheK0ok2f/gBw+Z6diMaNy6RJKfQUSPsKJo4Qp6wiip4SA8fXo/rRXY1BUd3F7knCezeF9NYuDCOfAYorDn/KMpvOJ+bWfsGkqRP9X6o36qspLQH+56jXHqxwl40+VXncZvnJF6Yb7X72OOqiOfLDux/tt9/l88f6wgBVgy33Ggh9QU/4Li+rMpzPpcqXZVa+3Cf5YcetP+/sulonAhb1Ot3eF4eMAd9kCfc2y1bTlm7vuXU5em8AcSRZL2TJALlljHEiAGGCe+Yr+mxRKE6Uz0B9Jdp7yh1vVd+DfRiHH/PFuBtIjVosq8Qrq42655ol7l/XGXthb3f1UZ7eVIlZXS49lGA+juyTap6d38b94qG66NzMojqhAL1zm96oSbTIpvXbuSnSvLD6jvu8vu+HpkVyu5fx3nvr8rLHQu0CLPuB+wHsqdiSTRFW8R5Rm97K0qkV6hYpOrky/jyXzXkxntyaset9IbkpuB8CkL0mJaNTUwb5HINTbchNudmZ2jECBiPqfQ63vzaZQMe2y9TflnXervjR7y1udCLjC/YKuuTQ10p81tf+Cfi7CcxG+FVfh2NnNZFGSxqjTgwaiCRiFCogjTRaqxfr2GE9xw5hQwHja3zT1CyFxd2Iku/LdezkaZppc+vgVa7+HdIdAY7eipPxlA5puWfUTNO/6JkB5xGkRSeUGRS4v6cjuaX1F0f01f+/p6qYDm/q/RtiFfwTkR72jCfuqehqtJGOODnKth5RvfU6xMmq7WVCNO9OqqVcRABoaRIGNILyxYqTQb8+xod6jNeLHKAB4xB44wIF1w88UgEPcapcY99WgCpBMMKAqVgQkvL45QaoZDdVQYJhtyfmUWOOw+9G2EffzeTWFXrWba8cmdNbyiGBEGLk5i93TzfeZO6rQ1tPe0Tzmfusq811dsB55oHy+fPaPjNg2UXltxu69irf4qPLkwDZrFyo1e25tXEzqMDqvf28cBB9LBdmumI44zuDdUbTRO40y/k8LxwvGt6187ZFiVnU6Sp+Z2TnjCtX4eBEf5sit8ddtX92vyzU2NOleVE5HMvKfTpfFxuWa69kswqblmDwvJn3SVv+z01kMZbFuZ01rWYcNmeq6FJTxO3/OK/KZy357D9/LKsbh1Fp7KO7wz/ofDJplhMs3KJ8ycql5bN/Vw/240iq+Z8iddlEGx9gtZgOnqmr0gSLh3xZrlZxup7bODcVtur7fdfAb2aM64O7fqIbtwvBu4//vqk92O60pYX3xFR0LsZ3ofA9XnVX/TFKvYc1TqPPtu6kzq0/PfiHt81d//bmwxpJM0eMJCv/tP/9as/1L/fJYr3rvj5k1bNV5yyWs82QG/WAkzJvLW8V0tvxuNPYJEnlHPdunHc+QJhvCzc6duuPxqx1J16we9v+8PPHfzhkW8n+9sd91qkXxx3909vb25qIa168ItFq1359MIXZwiiPF5g4PDgHRRh1q+/JZp48NB4qHHf6Kd1fTfz7JO9N1USvtcaLVF5Quxu1/aBtPRhBI6hRarWbRGGGA3bkFl19Hq7jYJ1W/sgDsH6570VltW/BnFXm7vMk7mIO71fadT2Yw1X1l8VBIpKOxeo8JzhmiloBDGHIdHciUA9z1OM0Ax6PaQPWlwa5WZn04La9Jda1wk1aA9C+NBRQomJXEKZp7ZnTVEQ8YVAZppUOWWwoZDffEiI0lKHQ1xsuzJjTYdOtEd4yR8f5+IevjLOZvWSt5dzUceuNi8J7pUwQN86UTt8ST5ncAVPJTr/hslL/njzoeX/boVe57axmQWq/pN2BncR5iOOIGGZATBJqaLxx261Bu3vfNZDyNcLxAAofp4jJ28ouxMz697+8a3d17GZxXaIt8hQM21wgswd5G7Y61BMDNcI9PbsorZmdF/1JQa+z1YlVCWXFLJBGmoCOSkhphLggAsSKpyagjIZiULTtkdHUE1gg+4uj06mEG9uo6gdVLMpVR9Xlouqj2gXfjlYTx1YPMCV9eow0C5tq+yujB95vcunLqHWWVQ09jlMUa1CpBWnjcKAq4gYQZRUEofHmnd6oyCO2ejumdXLg/LPbLLnlP/z3x0+jHZWf83yy0XN8LmRu95ZdhAYC8W4NZA8kfesi5vGmzel2dPsMY9OdkT+puV7Oy8QnM/ZzKrwMHsVI1UIGYU0I0t3UUBhRHlMqjFQoBgEdVLznFPVqI77+REeaYducdp/8muvRR6vWewIRJiTsJgdDEakUwDhhXMcKhUMA6uujAgzZi2+IMd/fQzmk9gHZVFdEbVRRmzIUd4IggqY0wlDzlAMD7MYP2NdHsD61fVG2IbUbn9mzqM32p/ZLYd1Vz0KUxNTwDuvG3EQxSgxXSRwLHrJnYa+XO+Ry2MxdeHy9B0xbG5JyxPDYl0rKalgwkhomRLRJiTiNpCSaC6phnKYBuyL1pTCXQxc196S48gAprmREBd618mBZ9WO+i4kG9rVuN7vlFEYxY6lKCGfWPAzoAeK9KI3wUFt6VC4eoPNJ08T9NVLb7u2Vvy9lIGWdJo2JZdNKxQJb5UsKEjDtQDQVrmtqNx0Z29T27G22v/k2TBQ9oOrueqhNK3sKQxhL0qE2sZycAckMUDEHKGDfxEGHAR+1IfRI5QOCjUNyjyt2u1ZyV8YTRoamsJ3fIay9FEGpDUepxCgOyMrJQJv27G1PIPCAidBDYqPXTGyySuZJDNVxR+EmMI2U5Z6xEvYzKgPKbdKT2z4tzeeICyG36QEFb9dDbVoF9bGkypi2X0TGII4ETw1SmgoIdThqM0Z2U9unpR2QjTqk9qv2gtGqtAwrSUnSrWm0Onkaa2NZfSoTEtAC42xggQ0novoaHvMAfhFvm5/XQ222orZJqep4wRxjpwQbEHMsrUgPqJP397bEHi3N5wY7YN7DcZtZ1weqCWa/5p9XJ32qE2stw9WESWjlNsXU5U6CrjAgETCY0tQQFpOQaj3utlKxJvvQSQ6bZN8OYNDegPGESc6QOv5Y/jXpoold5UwQWrkEsE5VIjruPoVxBLCVMiDGUKuAqoXojW+EiHsSCH0tsOX+7GdM0G1kk5RrFUBihQBNNOvwE4v4yCQsoSnAMUxJyHlDPQEk5DAMw3zKZYCg28i+KddK7SpSg2MZG92pAqE6iSBPlcQJ5ipkFYhAXReg9ARqmmBOxwMYIFAzplGKZwpmp11Ar43AsHOAy3e5U9Ns4h7wFzP5YhyhNyTGdBqxuT+tvJt1+4Z9ehF05mvWnQ/a8zXdr7rzNZ9GXdKb1qjLbdRuD7VsNlB7hmVNqgtpdyBozw0JUG/O5NgeB7I3mU/y4wys7N+xrI2iA4dJDvfBCIt62EBjv21wYAOName0ZsvSm/Zk2RZcYRust5z2sd9s3v5w2VGgph5Mo4sCNe/71iE8ENQC9rcHG4fqQ4A3IsXlRANlVHuUjMVZhWS3JFDeUjfPuD3wZZYXjqZvZ/lCP9S30OvUk1AjEuIDvkAxZq4Ww9PDR/IEcB5GsmLUddswPFSkA5VqDAl7vdksrHK3EgpBNzFJShBHLNZAGmoUIwGd62u36dol4wul+LJZArQWZNeazSIxdIX2HMTGhbbb+Q2WqhHSwGjCtOYiYEZhP0wigafeEnp8qY2MfpYv9VVnFLLKNUYQ5xJ3R+ciFCUxMATSRCYgoCnbH+dlTVlPn7RjmbKvOHfJUrsyZYlQadrNVKNGRzC2HNRggggL6DnHjd9zG7V9SYcyBJt+1blLbJXNknIYi47jAhEQpYRgwoGSSgUUypQPqO0Ryh7PRYi97e1p93qoXSWzECufMWXdSgAYpQIxQLHUKUpCxkB7mWqceNzSvgSHEJ1W+av2QTO8Etwp17DNymWawkhxlkJt0gQjGDCmRegIcmPP7sY8ALnRqyY3WxlYMQdc9UMOjEqSxDIVMQyolbO+S5A3Nc2doJOn7jxEvQAf4X67YnLTmtwg1Z1kNUmp/QcQTBExImS3M057arnPCpM+IywEta/VNWJJ5VbBCjwkUuUYtYaNPR0nUQLsy8QkEomAchmCvm9EoJGc+oCWEbsGaY7tKAOFr6PMqgtuc65lczhpVZovPU01h51nnps389TnOFrdUxK5UQuZNr3eKHx7b5QXU3S+Pwfpx6hBo6K1Tb0wg6HHJCnwgyaFnSWCQxDsxHBuoYRjgzXVrz6aIrNr5tz8e4QlNWMiIYgRqqgGrgv/6kvtmE4TvruQmI7AXWehbIi6d0iH97RZ0B8nFC6kw1+xH+IsFXMc9ON1ZKjMUE8NFQmgzIgD/BAvQpk5S5xnoJcCPJQq1OMuPqB33pCUB/gYXgQpzxLnGViUoFnOHXlNISakNLm2L0AbGORzhEvaGCXyGwZ2KSK/N5ZSyp6+OD6Lo89KGlCEF/livzS6c8KNnxtvjXp0IXjr2zEH442LvkHUVy8C4u3lpKudHW+NVL8UvOG+WAQHpqlxgntnokdLUxM+l13H46PXQHly6mDDEpCYoZ/nabxJosy0M0Pp8rsI+1KVf2Lur7uffKFq/U5uNc33cPDBpgCjmdTMhkF1CMO4Wnb59zxtIe1izUa5/IjP5TeY0bOehOKdz9McW110z/E6Ye64NWML2Ju9qYZNpXkxVbPOWCHfLe6eCRTCe/n5wUSVFaL0xJRRnkYqsncarabFRnZ1o7SaAtX2ZIrtnsyvD9nCfLL7wr3/Wqh5dx/2cj/TNEVa+6RIwmJX0xkmAaHnHoLr4QWdgQaenXFA95QxTkgxwgl5omzeP5bT+a+zN+2cXkhvrfJVJ/Uitkrqrb7/i1HuypT2OB278Wf9XlymLxkkf/tiID4eSUN0X+gPXDykjdKFj4FNZvA2qwYfzhJT1DyoZjvuuPM6GHt0aqzyEqlZEsXLMpuZsozcx5b3WIlyB12t051zXBx77iskvm2UTrL5L7tUzD18HB3QkaGrCkoP5GDfxDjIw7EHxPSymDy+LSwyTCVht7PyCjxrO2E0gdYY8xDIUgfdyTEEanMbqwhX3CYAoURPV17nJLUHLXFPvPsAFWpAKnmAi/haAgKiSgC+ExTSODGddh2CqUgTljCgJINJQC+y7DdeIsOKSuiLTfbrfJ6hMFuDdpasZqi25qlu0+4m+SzJZ7fTx6hW6G6zvPqkraWtvBUbtbSrhZF0mt2djo0gxFT5raaGkRW9ERdSJpAgDVXT1qHPaUIoGV0zDAnf9CxPhhTsW/ihuzzsadWwUXbYGrW3f5QHGGMes2t0bshOs+vJf7DqB7/Rf7CnYfi8O1zNtp49PtmFDv/73Vpnr9PnWGRDwV398Qhu6v6OEc7h9xBpphC09tC6CWkny/A4/eTliFS0F1p+eRoTfD3GvvFNeeryjjwNSy9LJ8tacrbcLmjHulHeVSeO3vdOu8bOKx2LJd1AuTsj4wSKmLREsTRYRxxRTFAc60Sr44liyLv2/h6y+DhD9uTleH5eJiNhfVt62GDvWKXacoQp/Z1223ajpGcj3rom4Tv1DqRev0sCBcPw0pFF+Kf/97M9EKvk3uVXg7So5Lk7W6YDCfOfzcwUauEyut/W10EdGKFXKs1X1SdS4hhUqWHN6DuR0DjCxigIeRwndJNh3Y2V4kBmQq+jIqEe+c488h0FmYLp4TIjMny+c5mtbR+65aNnMBTCMBI1v62cDrflQ5+FvFLfHGNVk0wSgzSBnXpFA2HEiIk1xZwxHXBSlQC95Jc1UHaUn9Agkd8zdFw1U6204yM90LFXCjoOVqCDiqWd7PQYgwhLCIkEnCIcsM9v1Q22AzrPOETm8V4dMB5tD9DZRWtrSxYQ+Jtrr5g5f+RjviwcJHSRzReBdCkrXkz2xalSTX/p+uKR09wiK+0mk6hzxSe47tdz+orgiiq4JsgkqtONBQsaMZVwERuScK23q1n2LRooXaG8Khh2ZTQBnk6kYYqCRyld8FU3feCrpg8JSXuNhylEEU1iwLHhyMiAcw7WP3maczBMo2kyZjru+XCB0zAsykxVNoms1pullpIOA31OBLdHV15Yes6BEFt1HkjtFk46MjQxLGKWG1k5qpRal6s/pYaAvpFzP7FX6Rs8m7PL7Y1lujnPPpnmLdulTmvfJyFoU9ruMUJdvJka0fiofdzUN/0pSKjr9BpqoeI4W0z/6u+zVxomYXxVPplCwTttPSTC0r5VQCCqlEQhB46hXkLsOveok5Pm0VCbmsqLBF3N0+fL8iF3Ccxb2fprVTBBxcsBB9gk3RnDMokQ4ymD2EAacjw86wV/YTN0ruOl88zJOaAj2CiFER2QaXeVgp1UYECcqjjtdANMkKuSYioWhKQ8jo+dhou8Ps8A2JNd/w/3BIB9/uEDiotPL0nv89/Nt/kXVfR4W2dJXgtv44wK9+B3EhKUxKmKiBS6aSohUARFQplmiZAk5Px02asoQGgYFoO+VgT0qM6ewKib5YmpMJcW+Sxb9qUp2i8qdupuCA5CUfVRuRFUdtdQCZxX7M7VhAOOQES1MIhDETHedOgSwL7lVHLGjdScBRzwQlEPSpgM4+Ow6bfXaVDykkyBNZTKRWbsRczECpVZH1D4ygBFJaU4kjGkVCJiARUnjR/aqmGxs7FTBjTnIYMfsueHpmQYTIPY46nDQQDlXMB5vmh9/WcXofxgAeB+9H8=</diagram></mxfile> \ No newline at end of file
diff --git a/installer/functions.sh b/installer/functions.sh
index c847ba1cd..cfc2d7eaa 100644
--- a/installer/functions.sh
+++ b/installer/functions.sh
@@ -418,7 +418,7 @@ install_non_systemd_init() {
if [ -d /etc/init.d -a ! -f /etc/init.d/netdata ]
then
- if [ "${key}" = "gentoo" ]
+ if [[ "${key}" =~ ^(gentoo|alpine).* ]]
then
echo >&2 "Installing OpenRC init file..."
run cp system/netdata-openrc /etc/init.d/netdata && \
@@ -437,7 +437,7 @@ install_non_systemd_init() {
run update-rc.d netdata defaults && \
run update-rc.d netdata enable && \
return 0
- elif [[ "${key}" =~ ^(amzn-201[567]|CentOS release 6|Red Hat Enterprise Linux Server release 6).* ]]
+ elif [[ "${key}" =~ ^(amzn-201[567]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6).* ]]
then
echo >&2 "Installing init.d file..."
run cp system/netdata-init-d /etc/init.d/netdata && \
diff --git a/makeself/Makefile.am b/makeself/Makefile.am
new file mode 100644
index 000000000..923f3cefb
--- /dev/null
+++ b/makeself/Makefile.am
@@ -0,0 +1,25 @@
+MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ build.sh \
+ makeself.sh \
+ makeself-license.txt \
+ install-alpine-packages.sh \
+ post-installer.sh \
+ jobs/10-prepare-destination.install.sh \
+ jobs/50-curl-7.53.1.install.sh \
+ jobs/50-bash-4.4.install.sh \
+ jobs/50-fping-4.0.install.sh \
+ jobs/70-netdata-git.install.sh \
+ jobs/99-makeself.install.sh \
+ run-all-jobs.sh \
+ install-or-update.sh \
+ build-x86_64-static.sh \
+ makeself-header.sh \
+ makeself-help-header.txt \
+ makeself.lsm \
+ functions.sh \
+ $(NULL)
diff --git a/makeself/Makefile.in b/makeself/Makefile.in
new file mode 100644
index 000000000..9c84cc042
--- /dev/null
+++ b/makeself/Makefile.in
@@ -0,0 +1,479 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = makeself
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
+ $(top_srcdir)/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
+ $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ build.sh \
+ makeself.sh \
+ makeself-license.txt \
+ install-alpine-packages.sh \
+ post-installer.sh \
+ jobs/10-prepare-destination.install.sh \
+ jobs/50-curl-7.53.1.install.sh \
+ jobs/50-bash-4.4.install.sh \
+ jobs/50-fping-4.0.install.sh \
+ jobs/70-netdata-git.install.sh \
+ jobs/99-makeself.install.sh \
+ run-all-jobs.sh \
+ install-or-update.sh \
+ build-x86_64-static.sh \
+ makeself-header.sh \
+ makeself-help-header.txt \
+ makeself.lsm \
+ functions.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu makeself/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu makeself/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/makeself/build-x86_64-static.sh b/makeself/build-x86_64-static.sh
index 0516beae2..8c84039f3 100755
--- a/makeself/build-x86_64-static.sh
+++ b/makeself/build-x86_64-static.sh
@@ -1,4 +1,6 @@
-#!/usr/bin/env sh
+#!/usr/bin/env bash
+
+. $(dirname "$0")/../installer/functions.sh || exit 1
set -e
@@ -16,22 +18,22 @@ then
#
# This command maps the current directory to
# /usr/src/netdata.git
- # inside the container and runs the script setup-x86_64-static.sh
+ # inside the container and runs the script install-alpine-packages.sh
# (also inside the container)
#
- sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.5 \
- /bin/sh /usr/src/netdata.git/makeself/setup-x86_64-static.sh
+ run sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.6 \
+ /bin/sh /usr/src/netdata.git/makeself/install-alpine-packages.sh
# save the changes made permanently
id=$(sudo docker ps -l -q)
- sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}"
+ run sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}"
fi
# Run the build script inside the container
-sudo docker run -a stdin -a stdout -a stderr -i -t -v \
+run sudo docker run -a stdin -a stdout -a stderr -i -t -v \
$(pwd):/usr/src/netdata.git:rw \
"${DOCKER_CONTAINER_NAME}" \
- /bin/sh /usr/src/netdata.git/makeself/build.sh
+ /bin/sh /usr/src/netdata.git/makeself/build.sh "${@}"
if [ "${USER}" ]
then
diff --git a/makeself/build.sh b/makeself/build.sh
index 7896425d7..afa4f545e 100755
--- a/makeself/build.sh
+++ b/makeself/build.sh
@@ -1,6 +1,28 @@
#!/usr/bin/env sh
-# First run setup-x86_64-static.sh under alpine linux to install
+# -----------------------------------------------------------------------------
+# parse command line arguments
+
+export NETDATA_BUILD_WITH_DEBUG=0
+
+while [ ! -z "${1}" ]
+do
+ case "${1}" in
+ debug)
+ export NETDATA_BUILD_WITH_DEBUG=1
+ ;;
+
+ *)
+ ;;
+ esac
+
+ shift
+done
+
+
+# -----------------------------------------------------------------------------
+
+# First run install-alpine-packages.sh under alpine linux to install
# the required packages. build-x86_64-static.sh will do this for you
# using docker.
diff --git a/makeself/functions.sh b/makeself/functions.sh
index 48835f0f5..a72a1f411 100755
--- a/makeself/functions.sh
+++ b/makeself/functions.sh
@@ -3,11 +3,12 @@
# -----------------------------------------------------------------------------
# allow running the jobs by hand
+[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0
[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/.."
[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/.."
-[ -z "${PROCESSORS}" ] && export PROCESSORS=$(cat /proc/cpuinfo 2>/dev/null | grep ^processor | wc -l)
+[ -z "${PROCESSORS}" ] && export PROCESSORS=$(grep -c ^processor /proc/cpuinfo)
[ -z "${PROCESSORS}" -o $((PROCESSORS)) -lt 1 ] && export PROCESSORS=1
export NULL=
diff --git a/makeself/setup-x86_64-static.sh b/makeself/install-alpine-packages.sh
index 87cd29669..87cd29669 100755
--- a/makeself/setup-x86_64-static.sh
+++ b/makeself/install-alpine-packages.sh
diff --git a/makeself/install-or-update.sh b/makeself/install-or-update.sh
index da63c64b6..b64e7be33 100755
--- a/makeself/install-or-update.sh
+++ b/makeself/install-or-update.sh
@@ -138,7 +138,7 @@ run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata
# -----------------------------------------------------------------------------
progress "fix plugin permissions"
-for x in apps.plugin freeipmi.plugin
+for x in apps.plugin freeipmi.plugin cgroup-network
do
f="usr/libexec/netdata/plugins.d/${x}"
@@ -149,6 +149,12 @@ do
fi
done
+# fix the fping binary
+if [ -f bin/fping ]
+then
+ run chown root:${NETDATA_GROUP} bin/fping
+ run chmod 4750 bin/fping
+fi
# -----------------------------------------------------------------------------
progress "starting netdata"
diff --git a/makeself/jobs/50-bash-4.4.install.sh b/makeself/jobs/50-bash-4.4.install.sh
index 07c84b6d7..8019cefb7 100755
--- a/makeself/jobs/50-bash-4.4.install.sh
+++ b/makeself/jobs/50-bash-4.4.install.sh
@@ -44,4 +44,7 @@ EOF
run make install
-run strip ${NETDATA_INSTALL_PATH}/bin/bash
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/bash
+fi
diff --git a/makeself/jobs/50-curl-7.53.1.install.sh b/makeself/jobs/50-curl-7.53.1.install.sh
index 0e375a918..038fb2ac9 100755
--- a/makeself/jobs/50-curl-7.53.1.install.sh
+++ b/makeself/jobs/50-curl-7.53.1.install.sh
@@ -27,4 +27,7 @@ run make clean
run make -j${PROCESSORS}
run make install
-run strip ${NETDATA_INSTALL_PATH}/bin/curl
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/curl
+fi
diff --git a/makeself/jobs/50-fping-4.0.install.sh b/makeself/jobs/50-fping-4.0.install.sh
index dbc91c51d..ce6cb270e 100755
--- a/makeself/jobs/50-fping-4.0.install.sh
+++ b/makeself/jobs/50-fping-4.0.install.sh
@@ -22,4 +22,7 @@ run make clean
run make -j${PROCESSORS}
run make install
-run strip ${NETDATA_INSTALL_PATH}/bin/fping
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/fping
+fi
diff --git a/makeself/jobs/70-netdata-git.install.sh b/makeself/jobs/70-netdata-git.install.sh
index 873830f9f..0486ce11a 100755
--- a/makeself/jobs/70-netdata-git.install.sh
+++ b/makeself/jobs/70-netdata-git.install.sh
@@ -4,13 +4,20 @@
cd "${NETDATA_SOURCE_PATH}" || exit 1
-export CFLAGS="-O3 -static"
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ export CFLAGS="-static -O3"
+else
+ export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -D_FORTIFY_SOURCE=2 -DNETDATA_INTERNAL_CHECKS=1"
+fi
run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \
--dont-wait \
--dont-start-it \
${NULL}
-run strip ${NETDATA_INSTALL_PATH}/bin/netdata
-run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/apps.plugin
-
+if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
+then
+ run strip ${NETDATA_INSTALL_PATH}/bin/netdata
+ run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/apps.plugin
+fi
diff --git a/makeself/run-all-jobs.sh b/makeself/run-all-jobs.sh
index b08fa9187..2ad594380 100755
--- a/makeself/run-all-jobs.sh
+++ b/makeself/run-all-jobs.sh
@@ -22,11 +22,6 @@ fi
# netdata source directory
export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/.."
-# number of processors this system has
-PROCESSORS=$(cat /proc/cpuinfo 2>/dev/null | grep ^processor | wc -l)
-[ -z "${PROCESSORS}" -o $(( PROCESSORS )) -lt 1 ] && PROCESSORS=1
-export PROCESSORS
-
# make sure ${NULL} is empty
export NULL=
@@ -43,4 +38,4 @@ do
done
echo >&2 "All jobs for static packaging done successfully."
-exit 0 \ No newline at end of file
+exit 0
diff --git a/netdata-installer.sh b/netdata-installer.sh
index 35cb850fb..f3f444f66 100755
--- a/netdata-installer.sh
+++ b/netdata-installer.sh
@@ -49,7 +49,7 @@ umask 002
# Be nice on production environments
renice 19 $$ >/dev/null 2>/dev/null
-processors=$(cat /proc/cpuinfo 2>/dev/null | grep ^processor | wc -l)
+processors=$(grep -c ^processor /proc/cpuinfo)
[ $(( processors )) -lt 1 ] && processors=1
# you can set CFLAGS before running installer
@@ -392,8 +392,10 @@ if [ ${DONOTWAIT} -eq 0 ]
if [ ! -z "${NETDATA_PREFIX}" ]
then
eval "read >&2 -ep \$'\001${TPUT_BOLD}${TPUT_GREEN}\002Press ENTER to build and install netdata to \'\001${TPUT_CYAN}\002${NETDATA_PREFIX}\001${TPUT_YELLOW}\002\'\001${TPUT_RESET}\002 > ' -e -r REPLY"
+ [ $? -ne 0 ] && exit 1
else
eval "read >&2 -ep \$'\001${TPUT_BOLD}${TPUT_GREEN}\002Press ENTER to build and install netdata to your system\001${TPUT_RESET}\002 > ' -e -r REPLY"
+ [ $? -ne 0 ] && exit 1
fi
fi
@@ -405,7 +407,7 @@ build_error() {
Sorry! netdata failed to build...
-You many need to check these:
+You may need to check these:
1. The package uuid-dev (or libuuid-devel) has to be installed.
@@ -782,6 +784,12 @@ if [ ${UID} -eq 0 ]
run chmod 4755 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin"
fi
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" ]
+ then
+ run chown root "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network"
+ run chmod 4755 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network"
+ fi
+
else
run chown "${NETDATA_USER}:${NETDATA_USER}" "${NETDATA_LOG_DIR}"
run chown -R "${NETDATA_USER}:${NETDATA_USER}" "${NETDATA_PREFIX}/usr/libexec/netdata"
@@ -1046,6 +1054,18 @@ if [ -f /etc/init.d/netdata ]
rm -i /etc/init.d/netdata
fi
+if [ -f /etc/periodic/daily/netdata-updater ]
+ then
+ echo "Deleting /etc/periodic/daily/netdata-updater ..."
+ rm -i /etc/periodic/daily/netdata-updater
+fi
+
+if [ -f /etc/cron.daily/netdata-updater ]
+ then
+ echo "Deleting /etc/cron.daily/netdata-updater ..."
+ rm -i /etc/cron.daily/netdata-updater
+fi
+
getent passwd netdata > /dev/null
if [ $? -eq 0 ]
then
diff --git a/netdata.spec b/netdata.spec
index 3753e27ad..bb8171359 100644
--- a/netdata.spec
+++ b/netdata.spec
@@ -77,11 +77,11 @@ Recommends: python2-psycopg2 \
Summary: Real-time performance monitoring, done right
Name: netdata
-Version: 1.7.0
+Version: 1.8.0
Release: 1%{?dist}
License: GPLv3+
Group: Applications/System
-Source0: https://github.com/firehol/%{name}/releases/download/v1.7.0/%{name}-1.7.0.tar.xz
+Source0: https://github.com/firehol/%{name}/releases/download/v1.8.0/%{name}-1.8.0.tar.xz
URL: http://my-netdata.io
BuildRequires: pkgconfig
BuildRequires: xz
@@ -117,7 +117,7 @@ so that you can get insights of what is happening now and what just
happened, on your systems and applications.
%prep
-%setup -q -n netdata-1.7.0
+%setup -q -n netdata-1.8.0
%build
%configure \
@@ -207,6 +207,9 @@ rm -rf "${RPM_BUILD_ROOT}"
%{_datadir}/%{name}/web
%changelog
+* Mon Sep 17 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.8.0-1
+ This is mainly a bugfix release.
+ Please check full changelog at github.
* Mon Jul 16 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.7.0-1
- netdata is now a fully featured statsd server
- new installation options
diff --git a/netdata.spec.in b/netdata.spec.in
index 6ba791232..eef2a0896 100644
--- a/netdata.spec.in
+++ b/netdata.spec.in
@@ -207,6 +207,9 @@ rm -rf "${RPM_BUILD_ROOT}"
%{_datadir}/%{name}/web
%changelog
+* Mon Sep 17 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.8.0-1
+ This is mainly a bugfix release.
+ Please check full changelog at github.
* Mon Jul 16 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.7.0-1
- netdata is now a fully featured statsd server
- new installation options
diff --git a/node.d/Makefile.am b/node.d/Makefile.am
index 28008aeb7..7f67faa6a 100644
--- a/node.d/Makefile.am
+++ b/node.d/Makefile.am
@@ -6,6 +6,7 @@ dist_node_DATA = \
fronius.node.js \
sma_webbox.node.js \
snmp.node.js \
+ stiebeleltron.node.js \
$(NULL)
nodemodulesdir=$(nodedir)/node_modules
@@ -14,14 +15,14 @@ dist_nodemodules_DATA = \
node_modules/extend.js \
node_modules/pixl-xml.js \
node_modules/net-snmp.js \
- node_modules/asn1.js \
+ node_modules/asn1-ber.js \
$(NULL)
-nodemodulesberdir=$(nodedir)/node_modules/ber
-dist_nodemodulesber_DATA = \
- node_modules/ber/index.js \
- node_modules/ber/errors.js \
- node_modules/ber/reader.js \
- node_modules/ber/types.js \
- node_modules/ber/writer.js \
+nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
$(NULL)
diff --git a/node.d/Makefile.in b/node.d/Makefile.in
index 35024cb12..dd572ee8b 100644
--- a/node.d/Makefile.in
+++ b/node.d/Makefile.in
@@ -81,7 +81,7 @@ host_triplet = @host@
subdir = node.d
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(dist_node_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemodulesber_DATA)
+ $(dist_nodemoduleslibber_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -144,9 +144,9 @@ am__uninstall_files_from_dir = { \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodemodulesdir)" \
- "$(DESTDIR)$(nodemodulesberdir)"
+ "$(DESTDIR)$(nodemoduleslibberdir)"
DATA = $(dist_node_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemodulesber_DATA)
+ $(dist_nodemoduleslibber_DATA)
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
@@ -300,6 +300,7 @@ dist_node_DATA = \
fronius.node.js \
sma_webbox.node.js \
snmp.node.js \
+ stiebeleltron.node.js \
$(NULL)
nodemodulesdir = $(nodedir)/node_modules
@@ -308,16 +309,16 @@ dist_nodemodules_DATA = \
node_modules/extend.js \
node_modules/pixl-xml.js \
node_modules/net-snmp.js \
- node_modules/asn1.js \
+ node_modules/asn1-ber.js \
$(NULL)
-nodemodulesberdir = $(nodedir)/node_modules/ber
-dist_nodemodulesber_DATA = \
- node_modules/ber/index.js \
- node_modules/ber/errors.js \
- node_modules/ber/reader.js \
- node_modules/ber/types.js \
- node_modules/ber/writer.js \
+nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
$(NULL)
all: all-am
@@ -395,27 +396,27 @@ uninstall-dist_nodemodulesDATA:
@list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodemodulesberDATA: $(dist_nodemodulesber_DATA)
+install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA)
@$(NORMAL_INSTALL)
- @list='$(dist_nodemodulesber_DATA)'; test -n "$(nodemodulesberdir)" || list=; \
+ @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesberdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodemodulesberdir)" || exit 1; \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \
fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesberdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesberdir)" || exit $$?; \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \
done
-uninstall-dist_nodemodulesberDATA:
+uninstall-dist_nodemoduleslibberDATA:
@$(NORMAL_UNINSTALL)
- @list='$(dist_nodemodulesber_DATA)'; test -n "$(nodemodulesberdir)" || list=; \
+ @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodemodulesberdir)'; $(am__uninstall_files_from_dir)
+ dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir)
tags TAGS:
ctags CTAGS:
@@ -457,7 +458,7 @@ check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
- for dir in "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemodulesberdir)"; do \
+ for dir in "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
@@ -512,7 +513,7 @@ info: info-am
info-am:
install-data-am: install-dist_nodeDATA install-dist_nodemodulesDATA \
- install-dist_nodemodulesberDATA
+ install-dist_nodemoduleslibberDATA
install-dvi: install-dvi-am
@@ -557,7 +558,7 @@ ps: ps-am
ps-am:
uninstall-am: uninstall-dist_nodeDATA uninstall-dist_nodemodulesDATA \
- uninstall-dist_nodemodulesberDATA
+ uninstall-dist_nodemoduleslibberDATA
.MAKE: install-am install-strip
@@ -565,16 +566,16 @@ uninstall-am: uninstall-dist_nodeDATA uninstall-dist_nodemodulesDATA \
ctags-am distclean distclean-generic distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dist_nodeDATA \
- install-dist_nodemodulesDATA install-dist_nodemodulesberDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_nodeDATA \
- uninstall-dist_nodemodulesDATA \
- uninstall-dist_nodemodulesberDATA
+ install-dist_nodemodulesDATA \
+ install-dist_nodemoduleslibberDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_nodeDATA uninstall-dist_nodemodulesDATA \
+ uninstall-dist_nodemoduleslibberDATA
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/node.d/README.md b/node.d/README.md
index 3c2977905..6818d34de 100644
--- a/node.d/README.md
+++ b/node.d/README.md
@@ -61,3 +61,58 @@ Sample:
If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`.
---
+
+# stiebel eltron
+
+This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
+See `netdata/conf.d/node.d/stiebeleltron.conf.md` for more details.
+
+**Requirements**
+ * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
+ * Stiebel Eltron ISG web with network access (http), without password login
+
+The charts are configurable, however, the provided default configuration collects the following:
+
+1. **General**
+ * Outside temperature in C
+ * Condenser temperature in C
+ * Heating circuit pressure in bar
+ * Flow rate in l/min
+ * Output of water and heat pumps in %
+
+2. **Heating**
+ * Heat circuit 1 temperature in C (set/actual)
+ * Heat circuit 2 temperature in C (set/actual)
+ * Flow temperature in C (set/actual)
+ * Buffer temperature in C (set/actual)
+ * Pre-flow temperature in C
+
+3. **Hot Water**
+ * Hot water temperature in C (set/actual)
+
+4. **Room Temperature**
+ * Heat circuit 1 room temperature in C (set/actual)
+ * Heat circuit 2 room temperature in C (set/actual)
+
+5. **Eletric Reheating**
+ * Dual Mode Reheating temperature in C (hot water/heating)
+
+6. **Process Data**
+ * Remaining compressor rest time in s
+
+7. **Runtime**
+ * Compressor runtime hours (hot water/heating)
+ * Reheating runtime hours (reheating 1/reheating 2)
+
+8. **Energy**
+ * Compressor today in kWh (hot water/heating)
+ * Compressor Total in kWh (hot water/heating)
+
+
+### configuration
+
+The default configuration is provided in [netdata/conf.d/node.d/stiebeleltron.conf.md](https://github.com/firehol/netdata/blob/master/conf.d/node.d/stiebeleltron.conf.md). Just change the `update_every` (if necessary) and hostnames. **You may have to adapt the configuration to suit your needs and setup** (which might be different).
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`.
+
+---
diff --git a/node.d/fronius.node.js b/node.d/fronius.node.js
index f771f6c3d..7aa2c13b7 100644
--- a/node.d/fronius.node.js
+++ b/node.d/fronius.node.js
@@ -1,15 +1,15 @@
-'use strict';
+"use strict";
// This program will connect to one or more Fronius Symo Inverters.
// to get the Solar Power Generated (current, today).
// example configuration in netdata/conf.d/node.d/fronius.conf.md
-var url = require('url');
-var http = require('http');
-var netdata = require('netdata');
+var url = require("url");
+var http = require("http");
+var netdata = require("netdata");
-netdata.debug('loaded ' + __filename + ' plugin');
+netdata.debug("loaded " + __filename + " plugin");
var fronius = {
name: "Fronius",
@@ -39,24 +39,25 @@ var fronius = {
},
// Gets the site power chart. Will be created if not existing.
- getSitePowerChart: function (service, id) {
+ getSitePowerChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
var chart = fronius.charts[id];
if (fronius.isDefined(chart)) return chart;
var dim = {};
- dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "Grid", 1);
- dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "Photovoltaics", 1);
- dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "Accumulator", 1);
+ dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "grid", 1);
+ dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "photovoltaics", 1);
+ dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "accumulator", 1);
chart = {
id: id, // the unique id of the chart
- name: '', // the unique name of the chart
- title: service.name + ' Current Site Power', // the title of the chart
- units: 'W', // the units of the chart dimensions
- family: 'power', // the family of the chart
- context: 'fronius.power', // the context of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Site Power", // the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "power", // the family of the chart
+ context: "fronius.power", // the context of the chart
type: netdata.chartTypes.area, // the type of the chart
- priority: fronius.base_priority + 1, // the priority relative to others in the same family
+ priority: fronius.base_priority + 1, // the priority relative to others in the same family
update_every: service.update_every, // the expected update frequency of the chart
dimensions: dim
};
@@ -67,19 +68,20 @@ var fronius = {
},
// Gets the site consumption chart. Will be created if not existing.
- getSiteConsumptionChart: function (service, id) {
+ getSiteConsumptionChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
var chart = fronius.charts[id];
if (fronius.isDefined(chart)) return chart;
var dim = {};
- dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "Load", 1);
+ dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "load", 1);
chart = {
id: id, // the unique id of the chart
- name: '', // the unique name of the chart
- title: service.name + ' Current Load', // the title of the chart
- units: 'W', // the units of the chart dimensions
- family: 'consumption', // the family of the chart
- context: 'fronius.consumption', // the context of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Load", // the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "consumption", // the family of the chart
+ context: "fronius.consumption", // the context of the chart
type: netdata.chartTypes.area, // the type of the chart
priority: fronius.base_priority + 2, // the priority relative to others in the same family
update_every: service.update_every, // the expected update frequency of the chart
@@ -92,20 +94,21 @@ var fronius = {
},
// Gets the site consumption chart. Will be created if not existing.
- getSiteAutonomyChart: function (service, id) {
+ getSiteAutonomyChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
var chart = fronius.charts[id];
if (fronius.isDefined(chart)) return chart;
var dim = {};
- dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "Autonomy", 1);
- dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "Self Consumption", 1);
+ dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "autonomy", 1);
+ dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "self_consumption", 1);
chart = {
id: id, // the unique id of the chart
- name: '', // the unique name of the chart
- title: service.name + ' Current Autonomy', // the title of the chart
- units: '%', // the units of the chart dimensions
- family: 'autonomy', // the family of the chart
- context: 'fronius.autonomy', // the context of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Autonomy", // the title of the chart
+ units: "%", // the units of the chart dimensions
+ family: "autonomy", // the family of the chart
+ context: "fronius.autonomy", // the context of the chart
type: netdata.chartTypes.area, // the type of the chart
priority: fronius.base_priority + 3, // the priority relative to others in the same family
update_every: service.update_every, // the expected update frequency of the chart
@@ -118,21 +121,22 @@ var fronius = {
},
// Gets the site energy chart for today. Will be created if not existing.
- getSiteEnergyTodayChart: function (service, chartId) {
+ getSiteEnergyTodayChart: function (service, suffix) {
+ var chartId = this.getChartId(service, suffix);
var chart = fronius.charts[chartId];
if (fronius.isDefined(chart)) return chart;
var dim = {};
- dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "Today", 1000);
+ dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "today", 1000);
chart = {
id: chartId, // the unique id of the chart
- name: '', // the unique name of the chart
- title: service.name + ' Energy production for today', // the title of the chart
- units: 'kWh', // the units of the chart dimensions
- family: 'energy', // the family of the chart
- context: 'fronius.energy.today', // the context of the chart
- type: netdata.chartTypes.area, // the type of the chart
- priority: fronius.base_priority + 4, // the priority relative to others in the same family
- update_every: service.update_every, // the expected update frequency of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Energy production for today",// the title of the chart
+ units: "kWh", // the units of the chart dimensions
+ family: "energy", // the family of the chart
+ context: "fronius.energy.today", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 4, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
dimensions: dim
};
chart = service.chart(chartId, chart);
@@ -142,21 +146,22 @@ var fronius = {
},
// Gets the site energy chart for today. Will be created if not existing.
- getSiteEnergyYearChart: function (service, chartId) {
+ getSiteEnergyYearChart: function (service, suffix) {
+ var chartId = this.getChartId(service, suffix);
var chart = fronius.charts[chartId];
if (fronius.isDefined(chart)) return chart;
var dim = {};
- dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "Year", 1000);
+ dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "year", 1000);
chart = {
- id: chartId, // the unique id of the chart
- name: '', // the unique name of the chart
- title: service.name + ' Energy production for this year', // the title of the chart
- units: 'kWh', // the units of the chart dimensions
- family: 'energy', // the family of the chart
- context: 'fronius.energy.year', // the context of the chart
- type: netdata.chartTypes.area, // the type of the chart
- priority: fronius.base_priority + 5, // the priority relative to others in the same family
- update_every: service.update_every, // the expected update frequency of the chart
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Energy production for this year",// the title of the chart
+ units: "kWh", // the units of the chart dimensions
+ family: "energy", // the family of the chart
+ context: "fronius.energy.year", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 5, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
dimensions: dim
};
chart = service.chart(chartId, chart);
@@ -167,35 +172,30 @@ var fronius = {
// Gets the inverter power chart. Will be created if not existing.
// Needs the array of inverters in order to create a chart with all inverters as dimensions
- getInverterPowerChart: function (service, chartId, inverters) {
-
+ getInverterPowerChart: function (service, suffix, inverters) {
+ var chartId = this.getChartId(service, suffix);
var chart = fronius.charts[chartId];
if (fronius.isDefined(chart)) return chart;
var dim = {};
-
- var inverterCount = Object.keys(inverters).length;
- var inverter = inverters[inverterCount.toString()];
- var i = 1;
- for (i; i <= inverterCount; i++) {
- if (fronius.isUndefined(inverter)) {
- netdata.error("Expected an Inverter with a numerical name! " +
- "Have a look at your JSON output to verify.");
- continue;
+ for (var key in inverters) {
+ if (inverters.hasOwnProperty(key)) {
+ var name = key;
+ if (!isNaN(key)) name = "inverter_" + key;
+ dim[key] = this.createBasicDimension("inverter_" + key, name, 1);
}
- dim[i.toString()] = this.createBasicDimension("inverter_" + i, "Inverter " + i, 1);
}
chart = {
- id: chartId, // the unique id of the chart
- name: '', // the unique name of the chart
- title: service.name + ' Current Inverter Output', // the title of the chart
- units: 'W', // the units of the chart dimensions
- family: 'inverters', // the family of the chart
- context: 'fronius.inverter.output', // the context of the chart
- type: netdata.chartTypes.stacked, // the type of the chart
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Inverter Output",// the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "inverters", // the family of the chart
+ context: "fronius.inverter.output", // the context of the chart
+ type: netdata.chartTypes.stacked, // the type of the chart
priority: fronius.base_priority + 6, // the priority relative to others in the same family
- update_every: service.update_every, // the expected update frequency of the chart
+ update_every: service.update_every, // the expected update frequency of the chart
dimensions: dim
};
chart = service.chart(chartId, chart);
@@ -205,62 +205,126 @@ var fronius = {
},
processResponse: function (service, content) {
- if (content === null) return;
- var json = JSON.parse(content);
- if (!fronius.isResponseValid(json)) return;
+ var json = fronius.convertToJson(content);
+ if (json === null) return;
// add the service
service.commit();
+ var chartDefinitions = fronius.parseCharts(service, json);
+ var chartCount = chartDefinitions.length;
+ while (chartCount--) {
+ var chartObj = chartDefinitions[chartCount];
+ service.begin(chartObj.chart);
+ var dimCount = chartObj.dimensions.length;
+ while (dimCount--) {
+ var dim = chartObj.dimensions[dimCount];
+ service.set(dim.name, dim.value);
+ }
+ service.end();
+ }
+ },
+
+ parseCharts: function (service, json) {
var site = json.Body.Data.Site;
+ return [
+ this.parsePowerChart(service, site),
+ this.parseConsumptionChart(service, site),
+ this.parseAutonomyChart(service, site),
+ this.parseEnergyTodayChart(service, site),
+ this.parseEnergyYearChart(service, site),
+ this.parseInverterChart(service, json.Body.Data.Inverters)
+ ];
+ },
- // Site Current Power Chart
- service.begin(fronius.getSitePowerChart(service, 'fronius_' + service.name + '.power'));
- service.set(fronius.powerGridId, Math.round(site.P_Grid));
- service.set(fronius.powerPvId, Math.round(site.P_PV));
- service.set(fronius.powerAccuId, Math.round(site.P_Akku));
- service.end();
-
- // Site Consumption Chart
- service.begin(fronius.getSiteConsumptionChart(service, 'fronius_' + service.name + '.consumption'));
- service.set(fronius.consumptionLoadId, Math.round(Math.abs(site.P_Load)));
- service.end();
-
- // Site Autonomy Chart
- service.begin(fronius.getSiteAutonomyChart(service, 'fronius_' + service.name + '.autonomy'));
- service.set(fronius.autonomyId, Math.round(site.rel_Autonomy));
+ parsePowerChart: function (service, site) {
+ return this.getChart(this.getSitePowerChart(service, "power"),
+ [
+ this.getDimension(this.powerGridId, Math.round(site.P_Grid)),
+ this.getDimension(this.powerPvId, Math.round(Math.max(site.P_PV, 0))),
+ this.getDimension(this.powerAccuId, Math.round(site.P_Akku))
+ ]
+ );
+ },
+
+ parseConsumptionChart: function (service, site) {
+ return this.getChart(this.getSiteConsumptionChart(service, "consumption"),
+ [this.getDimension(this.consumptionLoadId, Math.round(Math.abs(site.P_Load)))]
+ );
+ },
+
+ parseAutonomyChart: function (service, site) {
var selfConsumption = site.rel_SelfConsumption;
- service.set(fronius.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption));
- service.end();
-
- // Site Energy Today Chart
- service.begin(fronius.getSiteEnergyTodayChart(service, 'fronius_' + service.name + '.energy.today'));
- service.set(fronius.energyTodayId, Math.round(site.E_Day));
- service.end();
-
- // Site Energy Year Chart
- service.begin(fronius.getSiteEnergyYearChart(service, 'fronius_' + service.name + '.energy.year'));
- service.set(fronius.energyYearId, Math.round(site.E_Year));
- service.end();
-
- // Inverters
- var inverters = json.Body.Data.Inverters;
- var inverterCount = Object.keys(inverters).length + 1;
- while (inverterCount--) {
- var inverter = inverters[inverterCount];
- if (fronius.isUndefined(inverter)) continue;
- service.begin(fronius.getInverterPowerChart(service, 'fronius_' + service.name + '.inverters.output', inverters));
- service.set(inverterCount.toString(), Math.round(inverter.P));
- service.end();
+ return this.getChart(this.getSiteAutonomyChart(service, "autonomy"),
+ [
+ this.getDimension(this.autonomyId, Math.round(site.rel_Autonomy)),
+ this.getDimension(this.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption))
+ ]
+ );
+ },
+
+ parseEnergyTodayChart: function (service, site) {
+ return this.getChart(this.getSiteEnergyTodayChart(service, "energy.today"),
+ [this.getDimension(this.energyTodayId, Math.round(Math.max(site.E_Day, 0)))]
+ );
+ },
+
+ parseEnergyYearChart: function (service, site) {
+ return this.getChart(this.getSiteEnergyYearChart(service, "energy.year"),
+ [this.getDimension(this.energyYearId, Math.round(Math.max(site.E_Year, 0)))]
+ );
+ },
+
+ parseInverterChart: function (service, inverters) {
+ var dimensions = [];
+ for (var key in inverters) {
+ if (inverters.hasOwnProperty(key)) {
+ dimensions.push(this.getDimension(key, Math.round(inverters[key].P)));
+ }
+ }
+ return this.getChart(this.getInverterPowerChart(service, "inverters.output", inverters), dimensions);
+ },
+
+ getDimension: function (name, value) {
+ return {
+ name: name,
+ value: value
+ };
+ },
+
+ getChart: function (chart, dimensions) {
+ return {
+ chart: chart,
+ dimensions: dimensions
+ };
+ },
+
+ getChartId: function (service, suffix) {
+ return "fronius_" + service.name + "." + suffix;
+ },
+
+ convertToJson: function (httpBody) {
+ if (httpBody === null) return null;
+ var json = httpBody;
+ // can't parse if it's already a json object,
+ // the check enables easier testing if the httpBody is already valid JSON.
+ if (typeof httpBody !== "object") {
+ try {
+ json = JSON.parse(httpBody);
+ } catch (error) {
+ netdata.error("fronius: Got a response, but it is not valid JSON. Ignoring. Error: " + error.message);
+ return null;
+ }
}
+ return this.isResponseValid(json) ? json : null;
},
// some basic validation
isResponseValid: function (json) {
- if (fronius.isUndefined(json.Body)) return false;
- if (fronius.isUndefined(json.Body.Data)) return false;
- if (fronius.isUndefined(json.Body.Data.Site)) return false;
- return fronius.isDefined(json.Body.Data.Inverters);
+ if (this.isUndefined(json.Body)) return false;
+ if (this.isUndefined(json.Body.Data)) return false;
+ if (this.isUndefined(json.Body.Data.Site)) return false;
+ return this.isDefined(json.Body.Data.Inverters);
},
// module.serviceExecute()
@@ -268,11 +332,11 @@ var fronius = {
// its purpose is to prepare the request and call
// netdata.serviceExecute()
serviceExecute: function (name, uri, update_every) {
- netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every);
+ netdata.debug(this.name + ": " + name + ": url: " + uri + ", update_every: " + update_every);
var service = netdata.service({
name: name,
- request: netdata.requestFromURL('http://' + uri),
+ request: netdata.requestFromURL("http://" + uri),
update_every: update_every,
module: this
});
@@ -287,6 +351,7 @@ var fronius = {
while (len--) {
var server = config.servers[len];
if (fronius.isUndefined(server.update_every)) server.update_every = this.update_every;
+ if (fronius.areUndefined([server.name, server.hostname, server.api_path])) continue;
var url = server.hostname + server.api_path;
this.serviceExecute(server.name, url, server.update_every);
@@ -306,11 +371,19 @@ var fronius = {
},
isUndefined: function (value) {
- return typeof value === 'undefined';
+ return typeof value === "undefined";
+ },
+
+ areUndefined: function (valueArray) {
+ var i = 0;
+ for (i; i < valueArray.length; i++) {
+ if (this.isUndefined(valueArray[i])) return true;
+ }
+ return false;
},
isDefined: function (value) {
- return typeof value !== 'undefined';
+ return typeof value !== "undefined";
}
};
diff --git a/node.d/node_modules/asn1-ber.js b/node.d/node_modules/asn1-ber.js
new file mode 100644
index 000000000..57809f486
--- /dev/null
+++ b/node.d/node_modules/asn1-ber.js
@@ -0,0 +1,6 @@
+
+var Ber = require('./lib/ber/index')
+
+exports.Ber = Ber
+exports.BerReader = Ber.Reader
+exports.BerWriter = Ber.Writer
diff --git a/node.d/node_modules/asn1.js b/node.d/node_modules/asn1.js
deleted file mode 100644
index d1766e7a6..000000000
--- a/node.d/node_modules/asn1.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2011 Mark Cavage <mcavage@gmail.com> All rights reserved.
-
-// If you have no idea what ASN.1 or BER is, see this:
-// ftp://ftp.rsa.com/pub/pkcs/ascii/layman.asc
-
-var Ber = require('./ber/index');
-
-
-
-///--- Exported API
-
-module.exports = {
-
- Ber: Ber,
-
- BerReader: Ber.Reader,
-
- BerWriter: Ber.Writer
-
-};
diff --git a/node.d/node_modules/ber/errors.js b/node.d/node_modules/ber/errors.js
deleted file mode 100644
index ff21d4fab..000000000
--- a/node.d/node_modules/ber/errors.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2011 Mark Cavage <mcavage@gmail.com> All rights reserved.
-
-
-module.exports = {
-
- newInvalidAsn1Error: function(msg) {
- var e = new Error();
- e.name = 'InvalidAsn1Error';
- e.message = msg || '';
- return e;
- }
-
-};
diff --git a/node.d/node_modules/ber/index.js b/node.d/node_modules/ber/index.js
deleted file mode 100644
index 4fb90aea9..000000000
--- a/node.d/node_modules/ber/index.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2011 Mark Cavage <mcavage@gmail.com> All rights reserved.
-
-var errors = require('./errors');
-var types = require('./types');
-
-var Reader = require('./reader');
-var Writer = require('./writer');
-
-
-///--- Exports
-
-module.exports = {
-
- Reader: Reader,
-
- Writer: Writer
-
-};
-
-for (var t in types) {
- if (types.hasOwnProperty(t))
- module.exports[t] = types[t];
-}
-for (var e in errors) {
- if (errors.hasOwnProperty(e))
- module.exports[e] = errors[e];
-}
diff --git a/node.d/node_modules/ber/reader.js b/node.d/node_modules/ber/reader.js
deleted file mode 100644
index 0a00e98e3..000000000
--- a/node.d/node_modules/ber/reader.js
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2011 Mark Cavage <mcavage@gmail.com> All rights reserved.
-
-var assert = require('assert');
-
-var ASN1 = require('./types');
-var errors = require('./errors');
-
-
-///--- Globals
-
-var newInvalidAsn1Error = errors.newInvalidAsn1Error;
-
-
-
-///--- API
-
-function Reader(data) {
- if (!data || !Buffer.isBuffer(data))
- throw new TypeError('data must be a node Buffer');
-
- this._buf = data;
- this._size = data.length;
-
- // These hold the "current" state
- this._len = 0;
- this._offset = 0;
-}
-
-Object.defineProperty(Reader.prototype, 'length', {
- enumerable: true,
- get: function () { return (this._len); }
-});
-
-Object.defineProperty(Reader.prototype, 'offset', {
- enumerable: true,
- get: function () { return (this._offset); }
-});
-
-Object.defineProperty(Reader.prototype, 'remain', {
- get: function () { return (this._size - this._offset); }
-});
-
-Object.defineProperty(Reader.prototype, 'buffer', {
- get: function () { return (this._buf.slice(this._offset)); }
-});
-
-
-/**
- * Reads a single byte and advances offset; you can pass in `true` to make this
- * a "peek" operation (i.e., get the byte, but don't advance the offset).
- *
- * @param {Boolean} peek true means don't move offset.
- * @return {Number} the next byte, null if not enough data.
- */
-Reader.prototype.readByte = function(peek) {
- if (this._size - this._offset < 1)
- return null;
-
- var b = this._buf[this._offset] & 0xff;
-
- if (!peek)
- this._offset += 1;
-
- return b;
-};
-
-
-Reader.prototype.peek = function() {
- return this.readByte(true);
-};
-
-
-/**
- * Reads a (potentially) variable length off the BER buffer. This call is
- * not really meant to be called directly, as callers have to manipulate
- * the internal buffer afterwards.
- *
- * As a result of this call, you can call `Reader.length`, until the
- * next thing called that does a readLength.
- *
- * @return {Number} the amount of offset to advance the buffer.
- * @throws {InvalidAsn1Error} on bad ASN.1
- */
-Reader.prototype.readLength = function(offset) {
- if (offset === undefined)
- offset = this._offset;
-
- if (offset >= this._size)
- return null;
-
- var lenB = this._buf[offset++] & 0xff;
- if (lenB === null)
- return null;
-
- if ((lenB & 0x80) == 0x80) {
- lenB &= 0x7f;
-
- if (lenB == 0)
- throw newInvalidAsn1Error('Indefinite length not supported');
-
- if (lenB > 4)
- throw newInvalidAsn1Error('encoding too long');
-
- if (this._size - offset < lenB)
- return null;
-
- this._len = 0;
- for (var i = 0; i < lenB; i++)
- this._len = (this._len << 8) + (this._buf[offset++] & 0xff);
-
- } else {
- // Wasn't a variable length
- this._len = lenB;
- }
-
- return offset;
-};
-
-
-/**
- * Parses the next sequence in this BER buffer.
- *
- * To get the length of the sequence, call `Reader.length`.
- *
- * @return {Number} the sequence's tag.
- */
-Reader.prototype.readSequence = function(tag) {
- var seq = this.peek();
- if (seq === null)
- return null;
- if (tag !== undefined && tag !== seq)
- throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) +
- ': got 0x' + seq.toString(16));
-
- var o = this.readLength(this._offset + 1); // stored in `length`
- if (o === null)
- return null;
-
- this._offset = o;
- return seq;
-};
-
-
-Reader.prototype.readInt = function() {
- return this._readTag(ASN1.Integer);
-};
-
-
-Reader.prototype.readBoolean = function() {
- return (this._readTag(ASN1.Boolean) === 0 ? false : true);
-};
-
-
-Reader.prototype.readEnumeration = function() {
- return this._readTag(ASN1.Enumeration);
-};
-
-
-Reader.prototype.readString = function(tag, retbuf) {
- if (!tag)
- tag = ASN1.OctetString;
-
- var b = this.peek();
- if (b === null)
- return null;
-
- if (b !== tag)
- throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) +
- ': got 0x' + b.toString(16));
-
- var o = this.readLength(this._offset + 1); // stored in `length`
-
- if (o === null)
- return null;
-
- if (this.length > this._size - o)
- return null;
-
- this._offset = o;
-
- if (this.length === 0)
- return retbuf ? new Buffer(0) : '';
-
- var str = this._buf.slice(this._offset, this._offset + this.length);
- this._offset += this.length;
-
- return retbuf ? str : str.toString('utf8');
-};
-
-Reader.prototype.readOID = function(tag) {
- if (!tag)
- tag = ASN1.OID;
-
- var b = this.readString(tag, true);
- if (b === null)
- return null;
-
- var values = [];
- var value = 0;
-
- for (var i = 0; i < b.length; i++) {
- var byte = b[i] & 0xff;
-
- value <<= 7;
- value += byte & 0x7f;
- if ((byte & 0x80) == 0) {
- values.push(value);
- value = 0;
- }
- }
-
- value = values.shift();
- values.unshift(value % 40);
- values.unshift((value / 40) >> 0);
-
- return values.join('.');
-};
-
-
-Reader.prototype._readTag = function(tag) {
- assert.ok(tag !== undefined);
-
- var b = this.peek();
-
- if (b === null)
- return null;
-
- if (b !== tag)
- throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) +
- ': got 0x' + b.toString(16));
-
- var o = this.readLength(this._offset + 1); // stored in `length`
- if (o === null)
- return null;
-
- if (this.length > 4)
- throw newInvalidAsn1Error('Integer too long: ' + this.length);
-
- if (this.length > this._size - o)
- return null;
- this._offset = o;
-
- var fb = this._buf[this._offset];
- var value = 0;
-
- for (var i = 0; i < this.length; i++) {
- value <<= 8;
- value |= (this._buf[this._offset++] & 0xff);
- }
-
- if ((fb & 0x80) == 0x80 && i !== 4)
- value -= (1 << (i * 8));
-
- return value >> 0;
-};
-
-
-
-///--- Exported API
-
-module.exports = Reader;
diff --git a/node.d/node_modules/ber/types.js b/node.d/node_modules/ber/types.js
deleted file mode 100644
index 8aea00013..000000000
--- a/node.d/node_modules/ber/types.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2011 Mark Cavage <mcavage@gmail.com> All rights reserved.
-
-
-module.exports = {
- EOC: 0,
- Boolean: 1,
- Integer: 2,
- BitString: 3,
- OctetString: 4,
- Null: 5,
- OID: 6,
- ObjectDescriptor: 7,
- External: 8,
- Real: 9, // float
- Enumeration: 10,
- PDV: 11,
- Utf8String: 12,
- RelativeOID: 13,
- Sequence: 16,
- Set: 17,
- NumericString: 18,
- PrintableString: 19,
- T61String: 20,
- VideotexString: 21,
- IA5String: 22,
- UTCTime: 23,
- GeneralizedTime: 24,
- GraphicString: 25,
- VisibleString: 26,
- GeneralString: 28,
- UniversalString: 29,
- CharacterString: 30,
- BMPString: 31,
- Constructor: 32,
- Context: 128
-};
diff --git a/node.d/node_modules/ber/writer.js b/node.d/node_modules/ber/writer.js
deleted file mode 100644
index d9d99af68..000000000
--- a/node.d/node_modules/ber/writer.js
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2011 Mark Cavage <mcavage@gmail.com> All rights reserved.
-
-var assert = require('assert');
-var ASN1 = require('./types');
-var errors = require('./errors');
-
-
-///--- Globals
-
-var newInvalidAsn1Error = errors.newInvalidAsn1Error;
-
-var DEFAULT_OPTS = {
- size: 1024,
- growthFactor: 8
-};
-
-
-///--- Helpers
-
-function merge(from, to) {
- assert.ok(from);
- assert.equal(typeof(from), 'object');
- assert.ok(to);
- assert.equal(typeof(to), 'object');
-
- var keys = Object.getOwnPropertyNames(from);
- keys.forEach(function(key) {
- if (to[key])
- return;
-
- var value = Object.getOwnPropertyDescriptor(from, key);
- Object.defineProperty(to, key, value);
- });
-
- return to;
-}
-
-
-
-///--- API
-
-function Writer(options) {
- options = merge(DEFAULT_OPTS, options || {});
-
- this._buf = new Buffer(options.size || 1024);
- this._size = this._buf.length;
- this._offset = 0;
- this._options = options;
-
- // A list of offsets in the buffer where we need to insert
- // sequence tag/len pairs.
- this._seq = [];
-}
-
-Object.defineProperty(Writer.prototype, 'buffer', {
- get: function () {
- if (this._seq.length)
- throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)');
-
- return (this._buf.slice(0, this._offset));
- }
-});
-
-Writer.prototype.writeByte = function(b) {
- if (typeof(b) !== 'number')
- throw new TypeError('argument must be a Number');
-
- this._ensure(1);
- this._buf[this._offset++] = b;
-};
-
-
-Writer.prototype.writeInt = function(i, tag) {
- if (typeof(i) !== 'number')
- throw new TypeError('argument must be a Number');
- if (typeof(tag) !== 'number')
- tag = ASN1.Integer;
-
- var sz = 4;
-
- while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) &&
- (sz > 1)) {
- sz--;
- i <<= 8;
- }
-
- if (sz > 4)
- throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff');
-
- this._ensure(2 + sz);
- this._buf[this._offset++] = tag;
- this._buf[this._offset++] = sz;
-
- while (sz-- > 0) {
- this._buf[this._offset++] = ((i & 0xff000000) >>> 24);
- i <<= 8;
- }
-
-};
-
-
-Writer.prototype.writeNull = function() {
- this.writeByte(ASN1.Null);
- this.writeByte(0x00);
-};
-
-
-Writer.prototype.writeEnumeration = function(i, tag) {
- if (typeof(i) !== 'number')
- throw new TypeError('argument must be a Number');
- if (typeof(tag) !== 'number')
- tag = ASN1.Enumeration;
-
- return this.writeInt(i, tag);
-};
-
-
-Writer.prototype.writeBoolean = function(b, tag) {
- if (typeof(b) !== 'boolean')
- throw new TypeError('argument must be a Boolean');
- if (typeof(tag) !== 'number')
- tag = ASN1.Boolean;
-
- this._ensure(3);
- this._buf[this._offset++] = tag;
- this._buf[this._offset++] = 0x01;
- this._buf[this._offset++] = b ? 0xff : 0x00;
-};
-
-
-Writer.prototype.writeString = function(s, tag) {
- if (typeof(s) !== 'string')
- throw new TypeError('argument must be a string (was: ' + typeof(s) + ')');
- if (typeof(tag) !== 'number')
- tag = ASN1.OctetString;
-
- var len = Buffer.byteLength(s);
- this.writeByte(tag);
- this.writeLength(len);
- if (len) {
- this._ensure(len);
- this._buf.write(s, this._offset);
- this._offset += len;
- }
-};
-
-
-Writer.prototype.writeBuffer = function(buf, tag) {
- if (typeof(tag) !== 'number')
- throw new TypeError('tag must be a number');
- if (!Buffer.isBuffer(buf))
- throw new TypeError('argument must be a buffer');
-
- this.writeByte(tag);
- this.writeLength(buf.length);
- this._ensure(buf.length);
- buf.copy(this._buf, this._offset, 0, buf.length);
- this._offset += buf.length;
-};
-
-
-Writer.prototype.writeStringArray = function(strings) {
- if ((!strings instanceof Array))
- throw new TypeError('argument must be an Array[String]');
-
- var self = this;
- strings.forEach(function(s) {
- self.writeString(s);
- });
-};
-
-// This is really to solve DER cases, but whatever for now
-Writer.prototype.writeOID = function(s, tag) {
- if (typeof(s) !== 'string')
- throw new TypeError('argument must be a string');
- if (typeof(tag) !== 'number')
- tag = ASN1.OID;
-
- if (!/^([0-9]+\.){3,}[0-9]+$/.test(s))
- throw new Error('argument is not a valid OID string');
-
- function encodeOctet(bytes, octet) {
- if (octet < 128) {
- bytes.push(octet);
- } else if (octet < 16384) {
- bytes.push((octet >>> 7) | 0x80);
- bytes.push(octet & 0x7F);
- } else if (octet < 2097152) {
- bytes.push((octet >>> 14) | 0x80);
- bytes.push(((octet >>> 7) | 0x80) & 0xFF);
- bytes.push(octet & 0x7F);
- } else if (octet < 268435456) {
- bytes.push((octet >>> 21) | 0x80);
- bytes.push(((octet >>> 14) | 0x80) & 0xFF);
- bytes.push(((octet >>> 7) | 0x80) & 0xFF);
- bytes.push(octet & 0x7F);
- } else {
- bytes.push(((octet >>> 28) | 0x80) & 0xFF);
- bytes.push(((octet >>> 21) | 0x80) & 0xFF);
- bytes.push(((octet >>> 14) | 0x80) & 0xFF);
- bytes.push(((octet >>> 7) | 0x80) & 0xFF);
- bytes.push(octet & 0x7F);
- }
- }
-
- var tmp = s.split('.');
- var bytes = [];
- bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10));
- tmp.slice(2).forEach(function(b) {
- encodeOctet(bytes, parseInt(b, 10));
- });
-
- var self = this;
- this._ensure(2 + bytes.length);
- this.writeByte(tag);
- this.writeLength(bytes.length);
- bytes.forEach(function(b) {
- self.writeByte(b);
- });
-};
-
-
-Writer.prototype.writeLength = function(len) {
- if (typeof(len) !== 'number')
- throw new TypeError('argument must be a Number');
-
- this._ensure(4);
-
- if (len <= 0x7f) {
- this._buf[this._offset++] = len;
- } else if (len <= 0xff) {
- this._buf[this._offset++] = 0x81;
- this._buf[this._offset++] = len;
- } else if (len <= 0xffff) {
- this._buf[this._offset++] = 0x82;
- this._buf[this._offset++] = len >> 8;
- this._buf[this._offset++] = len;
- } else if (len <= 0xffffff) {
- this._buf[this._offset++] = 0x83;
- this._buf[this._offset++] = len >> 16;
- this._buf[this._offset++] = len >> 8;
- this._buf[this._offset++] = len;
- } else {
- throw new InvalidAsn1ERror('Length too long (> 4 bytes)');
- }
-};
-
-Writer.prototype.startSequence = function(tag) {
- if (typeof(tag) !== 'number')
- tag = ASN1.Sequence | ASN1.Constructor;
-
- this.writeByte(tag);
- this._seq.push(this._offset);
- this._ensure(3);
- this._offset += 3;
-};
-
-
-Writer.prototype.endSequence = function() {
- var seq = this._seq.pop();
- var start = seq + 3;
- var len = this._offset - start;
-
- if (len <= 0x7f) {
- this._shift(start, len, -2);
- this._buf[seq] = len;
- } else if (len <= 0xff) {
- this._shift(start, len, -1);
- this._buf[seq] = 0x81;
- this._buf[seq + 1] = len;
- } else if (len <= 0xffff) {
- this._buf[seq] = 0x82;
- this._buf[seq + 1] = len >> 8;
- this._buf[seq + 2] = len;
- } else if (len <= 0xffffff) {
- this._shift(start, len, 1);
- this._buf[seq] = 0x83;
- this._buf[seq + 1] = len >> 16;
- this._buf[seq + 2] = len >> 8;
- this._buf[seq + 3] = len;
- } else {
- throw new InvalidAsn1Error('Sequence too long');
- }
-};
-
-
-Writer.prototype._shift = function(start, len, shift) {
- assert.ok(start !== undefined);
- assert.ok(len !== undefined);
- assert.ok(shift);
-
- this._buf.copy(this._buf, start + shift, start, start + len);
- this._offset += shift;
-};
-
-Writer.prototype._ensure = function(len) {
- assert.ok(len);
-
- if (this._size - this._offset < len) {
- var sz = this._size * this._options.growthFactor;
- if (sz - this._offset < len)
- sz += len;
-
- var buf = new Buffer(sz);
-
- this._buf.copy(buf, 0, 0, this._offset);
- this._buf = buf;
- this._size = sz;
- }
-};
-
-
-
-///--- Exported API
-
-module.exports = Writer;
diff --git a/node.d/node_modules/lib/ber/errors.js b/node.d/node_modules/lib/ber/errors.js
new file mode 100644
index 000000000..0106747e6
--- /dev/null
+++ b/node.d/node_modules/lib/ber/errors.js
@@ -0,0 +1,9 @@
+
+module.exports = {
+ InvalidAsn1Error: function(msg) {
+ var e = new Error()
+ e.name = 'InvalidAsn1Error'
+ e.message = msg || ''
+ return e
+ }
+}
diff --git a/node.d/node_modules/lib/ber/index.js b/node.d/node_modules/lib/ber/index.js
new file mode 100644
index 000000000..65985c1e1
--- /dev/null
+++ b/node.d/node_modules/lib/ber/index.js
@@ -0,0 +1,17 @@
+
+var errors = require('./errors')
+var types = require('./types')
+
+var Reader = require('./reader')
+var Writer = require('./writer')
+
+for (var t in types)
+ if (types.hasOwnProperty(t))
+ exports[t] = types[t]
+
+for (var e in errors)
+ if (errors.hasOwnProperty(e))
+ exports[e] = errors[e]
+
+exports.Reader = Reader
+exports.Writer = Writer
diff --git a/node.d/node_modules/lib/ber/reader.js b/node.d/node_modules/lib/ber/reader.js
new file mode 100644
index 000000000..f93d829aa
--- /dev/null
+++ b/node.d/node_modules/lib/ber/reader.js
@@ -0,0 +1,269 @@
+
+var assert = require('assert');
+
+var ASN1 = require('./types');
+var errors = require('./errors');
+
+
+///--- Globals
+
+var InvalidAsn1Error = errors.InvalidAsn1Error;
+
+
+
+///--- API
+
+function Reader(data) {
+ if (!data || !Buffer.isBuffer(data))
+ throw new TypeError('data must be a node Buffer');
+
+ this._buf = data;
+ this._size = data.length;
+
+ // These hold the "current" state
+ this._len = 0;
+ this._offset = 0;
+}
+
+Object.defineProperty(Reader.prototype, 'length', {
+ enumerable: true,
+ get: function () { return (this._len); }
+});
+
+Object.defineProperty(Reader.prototype, 'offset', {
+ enumerable: true,
+ get: function () { return (this._offset); }
+});
+
+Object.defineProperty(Reader.prototype, 'remain', {
+ get: function () { return (this._size - this._offset); }
+});
+
+Object.defineProperty(Reader.prototype, 'buffer', {
+ get: function () { return (this._buf.slice(this._offset)); }
+});
+
+
+/**
+ * Reads a single byte and advances offset; you can pass in `true` to make this
+ * a "peek" operation (i.e., get the byte, but don't advance the offset).
+ *
+ * @param {Boolean} peek true means don't move offset.
+ * @return {Number} the next byte, null if not enough data.
+ */
+Reader.prototype.readByte = function(peek) {
+ if (this._size - this._offset < 1)
+ return null;
+
+ var b = this._buf[this._offset] & 0xff;
+
+ if (!peek)
+ this._offset += 1;
+
+ return b;
+};
+
+
+Reader.prototype.peek = function() {
+ return this.readByte(true);
+};
+
+
+/**
+ * Reads a (potentially) variable length off the BER buffer. This call is
+ * not really meant to be called directly, as callers have to manipulate
+ * the internal buffer afterwards.
+ *
+ * As a result of this call, you can call `Reader.length`, until the
+ * next thing called that does a readLength.
+ *
+ * @return {Number} the amount of offset to advance the buffer.
+ * @throws {InvalidAsn1Error} on bad ASN.1
+ */
+Reader.prototype.readLength = function(offset) {
+ if (offset === undefined)
+ offset = this._offset;
+
+ if (offset >= this._size)
+ return null;
+
+ var lenB = this._buf[offset++] & 0xff;
+ if (lenB === null)
+ return null;
+
+ if ((lenB & 0x80) == 0x80) {
+ lenB &= 0x7f;
+
+ if (lenB == 0)
+ throw InvalidAsn1Error('Indefinite length not supported');
+
+ if (lenB > 4)
+ throw InvalidAsn1Error('encoding too long');
+
+ if (this._size - offset < lenB)
+ return null;
+
+ this._len = 0;
+ for (var i = 0; i < lenB; i++)
+ this._len = (this._len << 8) + (this._buf[offset++] & 0xff);
+
+ } else {
+ // Wasn't a variable length
+ this._len = lenB;
+ }
+
+ return offset;
+};
+
+
+/**
+ * Parses the next sequence in this BER buffer.
+ *
+ * To get the length of the sequence, call `Reader.length`.
+ *
+ * @return {Number} the sequence's tag.
+ */
+Reader.prototype.readSequence = function(tag) {
+ var seq = this.peek();
+ if (seq === null)
+ return null;
+ if (tag !== undefined && tag !== seq)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + seq.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+ if (o === null)
+ return null;
+
+ this._offset = o;
+ return seq;
+};
+
+
+Reader.prototype.readInt = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Integer;
+
+ return this._readTag(ASN1.Integer);
+};
+
+
+Reader.prototype.readBoolean = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Boolean;
+
+ return (this._readTag(tag) === 0 ? false : true);
+};
+
+
+Reader.prototype.readEnumeration = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Enumeration;
+
+ return this._readTag(ASN1.Enumeration);
+};
+
+
+Reader.prototype.readString = function(tag, retbuf) {
+ if (!tag)
+ tag = ASN1.OctetString;
+
+ var b = this.peek();
+ if (b === null)
+ return null;
+
+ if (b !== tag)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + b.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+
+ if (o === null)
+ return null;
+
+ if (this.length > this._size - o)
+ return null;
+
+ this._offset = o;
+
+ if (this.length === 0)
+ return retbuf ? new Buffer(0) : '';
+
+ var str = this._buf.slice(this._offset, this._offset + this.length);
+ this._offset += this.length;
+
+ return retbuf ? str : str.toString('utf8');
+};
+
+Reader.prototype.readOID = function(tag) {
+ if (!tag)
+ tag = ASN1.OID;
+
+ var b = this.readString(tag, true);
+ if (b === null)
+ return null;
+
+ var values = [];
+ var value = 0;
+
+ for (var i = 0; i < b.length; i++) {
+ var byte = b[i] & 0xff;
+
+ value <<= 7;
+ value += byte & 0x7f;
+ if ((byte & 0x80) == 0) {
+ values.push(value >>> 0);
+ value = 0;
+ }
+ }
+
+ value = values.shift();
+ values.unshift(value % 40);
+ values.unshift((value / 40) >> 0);
+
+ return values.join('.');
+};
+
+
+Reader.prototype._readTag = function(tag) {
+ assert.ok(tag !== undefined);
+
+ var b = this.peek();
+
+ if (b === null)
+ return null;
+
+ if (b !== tag)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + b.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+ if (o === null)
+ return null;
+
+ if (this.length > 4)
+ throw InvalidAsn1Error('Integer too long: ' + this.length);
+
+ if (this.length > this._size - o)
+ return null;
+ this._offset = o;
+
+ var fb = this._buf[this._offset];
+ var value = 0;
+
+ for (var i = 0; i < this.length; i++) {
+ value <<= 8;
+ value |= (this._buf[this._offset++] & 0xff);
+ }
+
+ if ((fb & 0x80) == 0x80 && i !== 4)
+ value -= (1 << (i * 8));
+
+ return value >> 0;
+};
+
+
+
+///--- Exported API
+
+module.exports = Reader;
diff --git a/node.d/node_modules/lib/ber/types.js b/node.d/node_modules/lib/ber/types.js
new file mode 100644
index 000000000..345824bb1
--- /dev/null
+++ b/node.d/node_modules/lib/ber/types.js
@@ -0,0 +1,34 @@
+
+module.exports = {
+ EOC: 0,
+ Boolean: 1,
+ Integer: 2,
+ BitString: 3,
+ OctetString: 4,
+ Null: 5,
+ OID: 6,
+ ObjectDescriptor: 7,
+ External: 8,
+ Real: 9,
+ Enumeration: 10,
+ PDV: 11,
+ Utf8String: 12,
+ RelativeOID: 13,
+ Sequence: 16,
+ Set: 17,
+ NumericString: 18,
+ PrintableString: 19,
+ T61String: 20,
+ VideotexString: 21,
+ IA5String: 22,
+ UTCTime: 23,
+ GeneralizedTime: 24,
+ GraphicString: 25,
+ VisibleString: 26,
+ GeneralString: 28,
+ UniversalString: 29,
+ CharacterString: 30,
+ BMPString: 31,
+ Constructor: 32,
+ Context: 128
+}
diff --git a/node.d/node_modules/lib/ber/writer.js b/node.d/node_modules/lib/ber/writer.js
new file mode 100644
index 000000000..bf9805886
--- /dev/null
+++ b/node.d/node_modules/lib/ber/writer.js
@@ -0,0 +1,317 @@
+
+var assert = require('assert');
+var ASN1 = require('./types');
+var errors = require('./errors');
+
+
+///--- Globals
+
+var InvalidAsn1Error = errors.InvalidAsn1Error;
+
+var DEFAULT_OPTS = {
+ size: 1024,
+ growthFactor: 8
+};
+
+
+///--- Helpers
+
+function merge(from, to) {
+ assert.ok(from);
+ assert.equal(typeof(from), 'object');
+ assert.ok(to);
+ assert.equal(typeof(to), 'object');
+
+ var keys = Object.getOwnPropertyNames(from);
+ keys.forEach(function(key) {
+ if (to[key])
+ return;
+
+ var value = Object.getOwnPropertyDescriptor(from, key);
+ Object.defineProperty(to, key, value);
+ });
+
+ return to;
+}
+
+
+
+///--- API
+
+function Writer(options) {
+ options = merge(DEFAULT_OPTS, options || {});
+
+ this._buf = new Buffer(options.size || 1024);
+ this._size = this._buf.length;
+ this._offset = 0;
+ this._options = options;
+
+ // A list of offsets in the buffer where we need to insert
+ // sequence tag/len pairs.
+ this._seq = [];
+}
+
+Object.defineProperty(Writer.prototype, 'buffer', {
+ get: function () {
+ if (this._seq.length)
+ throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)');
+
+ return (this._buf.slice(0, this._offset));
+ }
+});
+
+Writer.prototype.writeByte = function(b) {
+ if (typeof(b) !== 'number')
+ throw new TypeError('argument must be a Number');
+
+ this._ensure(1);
+ this._buf[this._offset++] = b;
+};
+
+
+Writer.prototype.writeInt = function(i, tag) {
+ if (typeof(i) !== 'number')
+ throw new TypeError('argument must be a Number');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Integer;
+
+ var sz = 4;
+
+ while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) &&
+ (sz > 1)) {
+ sz--;
+ i <<= 8;
+ }
+
+ if (sz > 4)
+ throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff');
+
+ this._ensure(2 + sz);
+ this._buf[this._offset++] = tag;
+ this._buf[this._offset++] = sz;
+
+ while (sz-- > 0) {
+ this._buf[this._offset++] = ((i & 0xff000000) >>> 24);
+ i <<= 8;
+ }
+
+};
+
+
+Writer.prototype.writeNull = function() {
+ this.writeByte(ASN1.Null);
+ this.writeByte(0x00);
+};
+
+
+Writer.prototype.writeEnumeration = function(i, tag) {
+ if (typeof(i) !== 'number')
+ throw new TypeError('argument must be a Number');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Enumeration;
+
+ return this.writeInt(i, tag);
+};
+
+
+Writer.prototype.writeBoolean = function(b, tag) {
+ if (typeof(b) !== 'boolean')
+ throw new TypeError('argument must be a Boolean');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Boolean;
+
+ this._ensure(3);
+ this._buf[this._offset++] = tag;
+ this._buf[this._offset++] = 0x01;
+ this._buf[this._offset++] = b ? 0xff : 0x00;
+};
+
+
+Writer.prototype.writeString = function(s, tag) {
+ if (typeof(s) !== 'string')
+ throw new TypeError('argument must be a string (was: ' + typeof(s) + ')');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.OctetString;
+
+ var len = Buffer.byteLength(s);
+ this.writeByte(tag);
+ this.writeLength(len);
+ if (len) {
+ this._ensure(len);
+ this._buf.write(s, this._offset);
+ this._offset += len;
+ }
+};
+
+
+Writer.prototype.writeBuffer = function(buf, tag) {
+ if (!Buffer.isBuffer(buf))
+ throw new TypeError('argument must be a buffer');
+
+ // If no tag is specified we will assume `buf` already contains tag and length
+ if (typeof(tag) === 'number') {
+ this.writeByte(tag);
+ this.writeLength(buf.length);
+ }
+
+ this._ensure(buf.length);
+ buf.copy(this._buf, this._offset, 0, buf.length);
+ this._offset += buf.length;
+};
+
+
+Writer.prototype.writeStringArray = function(strings, tag) {
+ if (! (strings instanceof Array))
+ throw new TypeError('argument must be an Array[String]');
+
+ var self = this;
+ strings.forEach(function(s) {
+ self.writeString(s, tag);
+ });
+};
+
+// This is really to solve DER cases, but whatever for now
+Writer.prototype.writeOID = function(s, tag) {
+ if (typeof(s) !== 'string')
+ throw new TypeError('argument must be a string');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.OID;
+
+ if (!/^([0-9]+\.){3,}[0-9]+$/.test(s))
+ throw new Error('argument is not a valid OID string');
+
+ function encodeOctet(bytes, octet) {
+ if (octet < 128) {
+ bytes.push(octet);
+ } else if (octet < 16384) {
+ bytes.push((octet >>> 7) | 0x80);
+ bytes.push(octet & 0x7F);
+ } else if (octet < 2097152) {
+ bytes.push((octet >>> 14) | 0x80);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ } else if (octet < 268435456) {
+ bytes.push((octet >>> 21) | 0x80);
+ bytes.push(((octet >>> 14) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ } else {
+ bytes.push(((octet >>> 28) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 21) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 14) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ }
+ }
+
+ var tmp = s.split('.');
+ var bytes = [];
+ bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10));
+ tmp.slice(2).forEach(function(b) {
+ encodeOctet(bytes, parseInt(b, 10));
+ });
+
+ var self = this;
+ this._ensure(2 + bytes.length);
+ this.writeByte(tag);
+ this.writeLength(bytes.length);
+ bytes.forEach(function(b) {
+ self.writeByte(b);
+ });
+};
+
+
+Writer.prototype.writeLength = function(len) {
+ if (typeof(len) !== 'number')
+ throw new TypeError('argument must be a Number');
+
+ this._ensure(4);
+
+ if (len <= 0x7f) {
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xff) {
+ this._buf[this._offset++] = 0x81;
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xffff) {
+ this._buf[this._offset++] = 0x82;
+ this._buf[this._offset++] = len >> 8;
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xffffff) {
+ this._buf[this._offset++] = 0x83;
+ this._buf[this._offset++] = len >> 16;
+ this._buf[this._offset++] = len >> 8;
+ this._buf[this._offset++] = len;
+ } else {
+ throw new InvalidAsn1Error('Length too long (> 4 bytes)');
+ }
+};
+
+Writer.prototype.startSequence = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Sequence | ASN1.Constructor;
+
+ this.writeByte(tag);
+ this._seq.push(this._offset);
+ this._ensure(3);
+ this._offset += 3;
+};
+
+
+Writer.prototype.endSequence = function() {
+ var seq = this._seq.pop();
+ var start = seq + 3;
+ var len = this._offset - start;
+
+ if (len <= 0x7f) {
+ this._shift(start, len, -2);
+ this._buf[seq] = len;
+ } else if (len <= 0xff) {
+ this._shift(start, len, -1);
+ this._buf[seq] = 0x81;
+ this._buf[seq + 1] = len;
+ } else if (len <= 0xffff) {
+ this._buf[seq] = 0x82;
+ this._buf[seq + 1] = len >> 8;
+ this._buf[seq + 2] = len;
+ } else if (len <= 0xffffff) {
+ this._shift(start, len, 1);
+ this._buf[seq] = 0x83;
+ this._buf[seq + 1] = len >> 16;
+ this._buf[seq + 2] = len >> 8;
+ this._buf[seq + 3] = len;
+ } else {
+ throw new InvalidAsn1Error('Sequence too long');
+ }
+};
+
+
+Writer.prototype._shift = function(start, len, shift) {
+ assert.ok(start !== undefined);
+ assert.ok(len !== undefined);
+ assert.ok(shift);
+
+ this._buf.copy(this._buf, start + shift, start, start + len);
+ this._offset += shift;
+};
+
+Writer.prototype._ensure = function(len) {
+ assert.ok(len);
+
+ if (this._size - this._offset < len) {
+ var sz = this._size * this._options.growthFactor;
+ if (sz - this._offset < len)
+ sz += len;
+
+ var buf = new Buffer(sz);
+
+ this._buf.copy(buf, 0, 0, this._offset);
+ this._buf = buf;
+ this._size = sz;
+ }
+};
+
+
+
+///--- Exported API
+
+module.exports = Writer;
diff --git a/node.d/node_modules/net-snmp.js b/node.d/node_modules/net-snmp.js
index de5926104..ac9a8d350 100644
--- a/node.d/node_modules/net-snmp.js
+++ b/node.d/node_modules/net-snmp.js
@@ -1,7 +1,7 @@
// Copyright 2013 Stephen Vickers <stephen.vickers.sv@gmail.com>
-var ber = require ("asn1").Ber;
+var ber = require ("asn1-ber").Ber;
var dgram = require ("dgram");
var events = require ("events");
var util = require ("util");
diff --git a/node.d/node_modules/netdata.js b/node.d/node_modules/netdata.js
index 11202061e..143255d9e 100644
--- a/node.d/node_modules/netdata.js
+++ b/node.d/node_modules/netdata.js
@@ -364,12 +364,8 @@ var netdata = {
if(typeof value === 'undefined' || value === null)
return false;
- if(this._current_chart._dimensions_count !== 0) {
- if (value instanceof Buffer)
- this.queue('SET ' + dimension + ' = 0x' + value.toString('hex'));
- else
- this.queue('SET ' + dimension + ' = ' + value.toString());
- }
+ if(this._current_chart._dimensions_count !== 0)
+ this.queue('SET ' + dimension + ' = ' + value.toString());
return true;
};
diff --git a/node.d/snmp.node.js b/node.d/snmp.node.js
index 57b37ffa0..3e7027958 100644
--- a/node.d/snmp.node.js
+++ b/node.d/snmp.node.js
@@ -269,13 +269,36 @@ netdata.processors.snmp = {
failed++;
}
else {
- if(__DEBUG === true)
- netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + " = " + varbinds[i].value);
-
- if(varbinds[i].type === net_snmp.ObjectType.OctetString && service.snmp_oids_index[varbinds[i].oid].type !== 'title')
- value = parseFloat(varbinds[i].value) * 1000;
- else
- value = varbinds[i].value;
+ // test fom Counter64
+ // varbinds[i].type = net_snmp.ObjectType.Counter64;
+ // varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]);
+
+ switch(varbinds[i].type) {
+ case net_snmp.ObjectType.OctetString:
+ if(service.snmp_oids_index[varbinds[i].oid].type !== 'title')
+ // parse floating point values, exposed as strings
+ value = parseFloat(varbinds[i].value) * 1000;
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)");
+ else
+ // just use the string
+ value = varbinds[i].value;
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)");
+ break;
+
+ case net_snmp.ObjectType.Counter64:
+ // copy the buffer
+ value = '0x' + varbinds[i].value.toString('hex');
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)");
+ break;
+
+ case net_snmp.ObjectType.Integer:
+ case net_snmp.ObjectType.Counter:
+ case net_snmp.ObjectType.Gauge:
+ default:
+ value = varbinds[i].value;
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)");
+ break;
+ }
ok++;
}
diff --git a/node.d/stiebeleltron.node.js b/node.d/stiebeleltron.node.js
new file mode 100644
index 000000000..b0eb0aba7
--- /dev/null
+++ b/node.d/stiebeleltron.node.js
@@ -0,0 +1,196 @@
+'use strict';
+
+// This program will connect to one Stiebel Eltron ISG for heatpump heating
+// to get the heat pump metrics.
+
+// example configuration in netdata/conf.d/node.d/stiebeleltron.conf.md
+
+var url = require("url");
+var http = require("http");
+var netdata = require("netdata");
+
+netdata.debug("loaded " + __filename + " plugin");
+
+var stiebeleltron = {
+ name: "Stiebel Eltron",
+ enable_autodetect: false,
+ update_every: 10,
+ base_priority: 60000,
+ charts: {},
+ pages: {},
+
+ createBasicDimension: function (id, name, multiplier, divisor) {
+ return {
+ id: id, // the unique id of the dimension
+ name: name, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: multiplier, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ },
+
+ processResponse: function (service, html) {
+ if (html === null) return;
+
+ // add the service
+ service.commit();
+
+ var page = stiebeleltron.pages[service.name];
+ var categories = page.categories;
+ var categoriesCount = categories.length;
+ while (categoriesCount--) {
+ var context = {
+ html: html,
+ service: service,
+ category: categories[categoriesCount],
+ page: page,
+ chartDefinition: null,
+ dimension: null
+ };
+ stiebeleltron.processCategory(context);
+
+ }
+ },
+
+ processCategory: function (context) {
+ var charts = context.category.charts;
+ var chartCount = charts.length;
+ while (chartCount--) {
+ context.chartDefinition = charts[chartCount];
+ stiebeleltron.processChart(context);
+ }
+ },
+
+ processChart: function (context) {
+ var dimensions = context.chartDefinition.dimensions;
+ var dimensionCount = dimensions.length;
+ context.service.begin(stiebeleltron.getChartFromContext(context));
+
+ while (dimensionCount--) {
+ context.dimension = dimensions[dimensionCount];
+ stiebeleltron.processDimension(context);
+ }
+ context.service.end();
+ },
+
+ processDimension: function (context) {
+ var dimension = context.dimension;
+ var match = new RegExp(dimension.regex).exec(context.html);
+ if (match === null) return;
+ var value = match[1].replace(",", ".");
+ // most values have a single digit by default, which requires the values to be multiplied. can be overridden.
+ if (stiebeleltron.isDefined(dimension.digits)) {
+ value *= Math.pow(10, dimension.digits);
+ } else {
+ value *= 10;
+ }
+ context.service.set(stiebeleltron.getDimensionId(context), value);
+ },
+
+ getChartFromContext: function (context) {
+ var chartId = this.getChartId(context);
+ var chart = stiebeleltron.charts[chartId];
+ if (stiebeleltron.isDefined(chart)) return chart;
+
+ var chartDefinition = context.chartDefinition;
+ var service = context.service;
+ var dimensions = {};
+
+ var dimCount = chartDefinition.dimensions.length;
+ while (dimCount--) {
+ var dim = chartDefinition.dimensions[dimCount];
+ var multiplier = 1;
+ var divisor = 10;
+ if (stiebeleltron.isDefined(dim.digits)) divisor = Math.pow(10, Math.max(0, dim.digits));
+ if (stiebeleltron.isDefined(dim.multiplier)) multiplier = dim.multiplier;
+ if (stiebeleltron.isDefined(dim.divisor)) divisor = dim.divisor;
+ context.dimension = dim;
+ var dimId = this.getDimensionId(context);
+ dimensions[dimId] = this.createBasicDimension(dimId, dim.name, multiplier, divisor);
+ }
+
+ chart = {
+ id: chartId,
+ name: '',
+ title: chartDefinition.title,
+ units: chartDefinition.unit,
+ family: context.category.name,
+ context: 'stiebeleltron.' + context.page.id + "." + context.category.id,
+ type: chartDefinition.type,
+ priority: stiebeleltron.base_priority + chartDefinition.prio,// the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dimensions
+ };
+ chart = service.chart(chartId, chart);
+ stiebeleltron.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (name, uri, update_every) {
+ netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL(uri),
+ update_every: update_every,
+ module: this
+ });
+ service.execute(this.processResponse);
+ },
+
+
+ configure: function (config) {
+ if (stiebeleltron.isUndefined(config.pages)) return 0;
+ var added = 0;
+ var pageCount = config.pages.length;
+ while (pageCount--) {
+ var page = config.pages[pageCount];
+ // some validation
+ if (stiebeleltron.isUndefined(page.categories) || page.categories.length < 1) {
+ netdata.error("Your Stiebel Eltron config is invalid. Disabling plugin.");
+ return 0;
+ }
+ if (stiebeleltron.isUndefined(page.update_every)) page.update_every = this.update_every;
+ this.pages[page.name] = page;
+ this.serviceExecute(page.name, page.url, page.update_every);
+ added++;
+ }
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatedly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+
+ getChartId: function (context) {
+ return "stiebeleltron_" + context.page.id +
+ "." + context.category.id +
+ "." + context.chartDefinition.id;
+ },
+
+ getDimensionId: function (context) {
+ return context.dimension.id;
+ },
+
+ isUndefined: function (value) {
+ return typeof value === 'undefined';
+ },
+
+ isDefined: function (value) {
+ return typeof value !== 'undefined';
+ }
+};
+
+module.exports = stiebeleltron;
diff --git a/package.json b/package.json
new file mode 100644
index 000000000..69f74bcfd
--- /dev/null
+++ b/package.json
@@ -0,0 +1,23 @@
+{
+ "devDependencies": {
+ "coffee-script": "^1.12.7",
+ "jasmine": "^2.6.0",
+ "jasmine-core": "^2.6.4",
+ "karma": "^1.7.0",
+ "karma-chrome-launcher": "^2.2.0",
+ "karma-coverage": "^1.1.1",
+ "karma-firefox-launcher": "^1.0.1",
+ "karma-jasmine": "^1.1.0",
+ "walkdir": "^0.0.11",
+ "underscore": "^1.8.3",
+ "gaze": "^1.1.2",
+ "mkdirp": "^0.5.1",
+ "minimist": "^1.2.0",
+ "jasmine-growl-reporter": "^1.0.1",
+ "xml2js": "^0.4.17",
+ "grunt": "^1.0.1",
+ "grunt-exec": "^2.0.0",
+ "jasmine-reporters": "^2.2.1",
+ "jasmine-node": "BrainDoctor/jasmine-node"
+ }
+}
diff --git a/plugins.d/alarm-notify.sh b/plugins.d/alarm-notify.sh
index 4f619091f..9b7f6c8dd 100755
--- a/plugins.d/alarm-notify.sh
+++ b/plugins.d/alarm-notify.sh
@@ -43,7 +43,7 @@ then
id=1
last="CLEAR"
- for x in "CRITICAL" "WARNING" "CLEAR"
+ for x in "WARNING" "CRITICAL" "CLEAR"
do
echo >&2
echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}"
@@ -99,11 +99,41 @@ fatal() {
exit 1
}
-debug=0
+debug=${NETDATA_ALARM_NOTIFY_DEBUG-0}
debug() {
- [ ${debug} -eq 1 ] && log DEBUG "${@}"
+ [ "${debug}" = "1" ] && log DEBUG "${@}"
}
+docurl() {
+ if [ -z "${curl}" ]
+ then
+ error "\${curl} is unset."
+ return 1
+ fi
+
+ if [ "${debug}" = "1" ]
+ then
+ echo >&2 "--- BEGIN curl command ---"
+ printf >&2 "%q " ${curl} "${@}"
+ echo >&2
+ echo >&2 "--- END curl command ---"
+
+ local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX)
+ local code=$(${curl} --write-out %{http_code} --output "${out}" --silent --show-error "${@}")
+ local ret=$?
+ echo >&2 "--- BEGIN received response ---"
+ cat >&2 "${out}"
+ echo >&2
+ echo >&2 "--- END received response ---"
+ echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}"
+ rm "${out}"
+ echo "${code}"
+ return ${ret}
+ fi
+
+ ${curl} --write-out %{http_code} --output /dev/null --silent --show-error "${@}"
+ return $?
+}
# -----------------------------------------------------------------------------
# this is to be overwritten by the config file
@@ -122,33 +152,33 @@ custom_sender() {
# -----------------------------------------------------------------------------
# defaults to allow running this script by hand
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata"
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
+[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata"
[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
# -----------------------------------------------------------------------------
# parse command line parameters
-roles="${1}" # the roles that should be notified for this event
-host="${2}" # the host generated this event
-unique_id="${3}" # the unique id of this event
-alarm_id="${4}" # the unique id of the alarm that generated this event
-event_id="${5}" # the incremental id of the event, for this alarm id
-when="${6}" # the timestamp this event occurred
-name="${7}" # the name of the alarm, as given in netdata health.d entries
-chart="${8}" # the name of the chart (type.id)
-family="${9}" # the family of the chart
-status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
-old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
-value="${12}" # the current value of the alarm
-old_value="${13}" # the previous value of the alarm
-src="${14}" # the line number and file the alarm has been configured
-duration="${15}" # the duration in seconds of the previous alarm state
+roles="${1}" # the roles that should be notified for this event
+host="${2}" # the host generated this event
+unique_id="${3}" # the unique id of this event
+alarm_id="${4}" # the unique id of the alarm that generated this event
+event_id="${5}" # the incremental id of the event, for this alarm id
+when="${6}" # the timestamp this event occurred
+name="${7}" # the name of the alarm, as given in netdata health.d entries
+chart="${8}" # the name of the chart (type.id)
+family="${9}" # the family of the chart
+status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+value="${12}" # the current value of the alarm
+old_value="${13}" # the previous value of the alarm
+src="${14}" # the line number and file the alarm has been configured
+duration="${15}" # the duration in seconds of the previous alarm state
non_clear_duration="${16}" # the total duration in seconds this is/was non-clear
-units="${17}" # the units of the value
-info="${18}" # a short description of the alarm
-value_string="${19}" # friendly value (with units)
-old_value_string="${20}" # friendly old value (with units)
+units="${17}" # the units of the value
+info="${18}" # a short description of the alarm
+value_string="${19}" # friendly value (with units)
+old_value_string="${20}" # friendly old value (with units)
# -----------------------------------------------------------------------------
# find a suitable hostname to use, if netdata did not supply a hostname
@@ -251,6 +281,7 @@ KAFKA_SENDER_IP=
# pagerduty.com configs
PD_SERVICE_KEY=
+DEFAULT_RECIPIENT_PD=
declare -A role_recipients_pd=()
# custom configs
@@ -258,7 +289,9 @@ DEFAULT_RECIPIENT_CUSTOM=
declare -A role_recipients_custom=()
# email configs
+EMAIL_SENDER=
DEFAULT_RECIPIENT_EMAIL="root"
+EMAIL_CHARSET=$(locale charmap 2>/dev/null)
declare -A role_recipients_email=()
# load the user configuration
@@ -266,6 +299,16 @@ declare -A role_recipients_email=()
if [ -f "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" ]
then
source "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf"
+else
+ error "Cannot find file ${NETDATA_CONFIG_DIR}/health_alarm_notify.conf. Using internal defaults."
+fi
+
+# If we didn't autodetect the character set for e-mail and it wasn't
+# set by the user, we need to set it to a reasonable default. UTF-8
+# should be correct for almost all modern UNIX systems.
+if [ -z ${EMAIL_CHARSET} ]
+ then
+ EMAIL_CHARSET="UTF-8"
fi
# -----------------------------------------------------------------------------
@@ -283,28 +326,45 @@ filter_recipient_by_criticality() {
# the severity is invalid
s="${s^^}"
- [ "${s}" != "CRITICAL" ] && return 0
-
- # the new or the old status matches the severity
- if [ "${s}" = "${status}" -o "${s}" = "${old_status}" ]
- then
- [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \
- mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}"
-
- # we need to keep track of the notifications we sent
- # so that the same user will receive the recovery
- # even if old_status does not match the required severity
- touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ if [ "${s}" != "CRITICAL" ]
+ then
+ error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported."
return 0
fi
- # it is a cleared alarm we have sent notification for
- if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]
- then
- rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
- return 0
- fi
+ # create the status tracking directory for this user
+ [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \
+ mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}"
+ case "${status}" in
+ CRITICAL)
+ # make sure he will get future notifications for this alarm too
+ touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)"
+ return 0
+ ;;
+
+ WARNING)
+ if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]
+ then
+ # we do not remove the file, so that he will get future notifications of this alarm
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
+ return 0
+ fi
+ ;;
+
+ *)
+ if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]
+ then
+ # remove the file, so that he will only receive notifications for CRITICAL states for this alarm
+ rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)"
+ return 0
+ fi
+ ;;
+ esac
+
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification"
return 1
}
@@ -321,6 +381,7 @@ declare -A arr_telegram=()
declare -A arr_pd=()
declare -A arr_email=()
declare -A arr_custom=()
+declare -A arr_messagebird=()
# netdata may call us with multiple roles, and roles may have multiple but
# overlapping recipients - so, here we find the unique recipients.
@@ -508,8 +569,7 @@ if [ "${SEND_PD}" = "YES" ]
pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)"
if [ -z "${pd_send}" ]
then
- # no pd-send available
- # disable pagerduty.com
+ error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications."
SEND_PD="NO"
fi
fi
@@ -530,8 +590,7 @@ if [ \( \
curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
if [ -z "${curl}" ]
then
- # no curl available
- # disable all curl based methods
+ error "Cannot find curl command in the system path. Disabling all curl based notifications."
SEND_PUSHOVER="NO"
SEND_PUSHBULLET="NO"
SEND_TELEGRAM="NO"
@@ -548,7 +607,11 @@ fi
if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ]
then
sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)"
- [ -z "${sendmail}" ] && SEND_EMAIL="NO"
+ if [ -z "${sendmail}" ]
+ then
+ debug "Cannot find sendmail command in the system path. Disabling email notifications."
+ SEND_EMAIL="NO"
+ fi
fi
# check that we have at least a method enabled
@@ -659,11 +722,39 @@ duration4human() {
# email sender
send_email() {
- local ret=
+ local ret= opts=
if [ "${SEND_EMAIL}" = "YES" ]
then
- "${sendmail}" -t
+ if [ ! -z "${EMAIL_SENDER}" ]
+ then
+ if [[ "${EMAIL_SENDER}" =~ \".*\"\ \<.*\> ]]
+ then
+ # the name includes single quotes
+ opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
+ elif [[ "${EMAIL_SENDER}" =~ \'.*\'\ \<.*\> ]]
+ then
+ # the name includes double quotes
+ opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
+ elif [[ "${EMAIL_SENDER}" =~ .*\ \<.*\> ]]
+ then
+ # the name does not have any quotes
+ opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F '$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)'"
+ else
+ # no name at all
+ opts=" -f ${EMAIL_SENDER}"
+ fi
+ fi
+
+ if [ "${debug}" = "1" ]
+ then
+ echo >&2 "--- BEGIN sendmail command ---"
+ printf >&2 "%q " "${sendmail}" -t ${opts}
+ echo >&2
+ echo >&2 "--- END sendmail command ---"
+ fi
+
+ "${sendmail}" -t ${opts}
ret=$?
if [ ${ret} -eq 0 ]
@@ -699,7 +790,7 @@ send_pushover() {
for user in ${usertokens}
do
- httpcode=$(${curl} --write-out %{http_code} --silent --output /dev/null \
+ httpcode=$(docurl \
--form-string "token=${apptoken}" \
--form-string "user=${user}" \
--form-string "html=1" \
@@ -736,7 +827,7 @@ send_pushbullet() {
#https://docs.pushbullet.com/#create-push
for user in ${recipients}
do
- httpcode=$(${curl} --write-out %{http_code} --silent --output /dev/null \
+ httpcode=$(docurl \
--header 'Access-Token: '${userapikey}'' \
--header 'Content-Type: application/json' \
--data-binary @<(cat <<EOF
@@ -769,7 +860,7 @@ send_kafka() {
local httpcode sent=0
if [ "${SEND_KAFKA}" = "YES" ]
then
- httpcode=$(${curl} -X POST --write-out %{http_code} --silent --output /dev/null \
+ httpcode=$(docurl -X POST \
--data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \
"${KAFKA_URL}")
@@ -853,7 +944,7 @@ send_twilio() {
#https://www.twilio.com/packages/labs/code/bash/twilio-sms
for user in ${recipients}
do
- httpcode=$(${curl} -X POST --write-out %{http_code} --silent --output /dev/null \
+ httpcode=$(docurl -X POST \
--data-urlencode "From=${twilionumber}" \
--data-urlencode "To=${user}" \
--data-urlencode "Body=${title} ${message}" \
@@ -907,10 +998,10 @@ send_hipchat() {
for room in ${recipients}
do
- httpcode=$(${curl} -X POST --write-out %{http_code} --silent --output /dev/null \
+ httpcode=$(docurl -X POST \
-H "Content-type: application/json" \
-H "Authorization: Bearer ${authtoken}" \
- -d "{\"color\": \"${color}\", \"from\": \"${netdata}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \
+ -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \
"https://${HIPCHAT_SERVER}/v2/room/${room}/notification")
if [ "${httpcode}" == "204" ]
@@ -939,7 +1030,7 @@ send_messagebird() {
#https://developers.messagebird.com/docs/messaging
for user in ${recipients}
do
- httpcode=$(${curl} -X POST --write-out %{http_code} --silent --output /dev/null \
+ httpcode=$(docurl -X POST \
--data-urlencode "originator=${messagebirdnumber}" \
--data-urlencode "recipients=${user}" \
--data-urlencode "body=${title} ${message}" \
@@ -982,7 +1073,7 @@ send_telegram() {
for chatid in ${chatids}
do
# https://core.telegram.org/bots/api#sendmessage
- httpcode=$(${curl} --write-out %{http_code} --silent --output /dev/null ${disableNotification} \
+ httpcode=$(docurl ${disableNotification} \
--data-urlencode "parse_mode=HTML" \
--data-urlencode "disable_web_page_preview=true" \
--data-urlencode "text=${emoji} ${message}" \
@@ -1047,7 +1138,7 @@ send_slack() {
}
],
"thumb_url": "${image}",
- "footer": "<${goto_url}|${host}>",
+ "footer": "by <${goto_url}|${this_host}>",
"ts": ${when}
}
]
@@ -1055,7 +1146,7 @@ send_slack() {
EOF
)"
- httpcode=$(${curl} --write-out %{http_code} --silent --output /dev/null -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
if [ "${httpcode}" == "200" ]
then
info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
@@ -1110,7 +1201,7 @@ send_discord() {
],
"thumb_url": "${image}",
"footer_icon": "${images_base_url}/images/seo-performance-128.png",
- "footer": "${host}",
+ "footer": "${this_host}",
"ts": ${when}
}
]
@@ -1118,7 +1209,7 @@ send_discord() {
EOF
)"
- httpcode=$(${curl} --write-out %{http_code} --silent --output /dev/null -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
if [ "${httpcode}" == "200" ]
then
info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
@@ -1363,7 +1454,9 @@ Content-Type: multipart/alternative; boundary="multipart-boundary"
This is a MIME-encoded multipart message
--multipart-boundary
-Content-Type: text/plain
+Content-Type: text/plain; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
${host} ${status_message}
@@ -1379,7 +1472,9 @@ Date : ${date}
Notification generated on ${this_host}
--multipart-boundary
-Content-Type: text/html
+Content-Type: text/html; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
diff --git a/plugins.d/charts.d.plugin b/plugins.d/charts.d.plugin
index eda5c0de4..c36a0cde3 100755
--- a/plugins.d/charts.d.plugin
+++ b/plugins.d/charts.d.plugin
@@ -25,6 +25,8 @@ MODULE_NAME="main"
debug=0
TMP_DIR=
chartsd_cleanup() {
+ trap '' EXIT QUIT HUP INT TERM
+
if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
then
[ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
@@ -32,9 +34,7 @@ chartsd_cleanup() {
fi
exit 0
}
-trap chartsd_cleanup EXIT
-trap chartsd_cleanup SIGHUP
-trap chartsd_cleanup INT
+trap chartsd_cleanup EXIT QUIT HUP INT TERM
if [ $UID = "0" ]
then
diff --git a/plugins.d/fping.plugin b/plugins.d/fping.plugin
index b6d981a85..f38a8dde0 100755
--- a/plugins.d/fping.plugin
+++ b/plugins.d/fping.plugin
@@ -181,6 +181,7 @@ fi
options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
# execute fping
+info "starting fping: ${fping} ${options[*]}"
exec "${fping}" "${options[@]}"
# if we cannot execute fping, stop
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
index 43f25cffe..84c2aeadd 100644
--- a/python.d/Makefile.am
+++ b/python.d/Makefile.am
@@ -16,6 +16,7 @@ dist_python_DATA = \
apache.chart.py \
apache_cache.chart.py \
bind_rndc.chart.py \
+ chrony.chart.py \
cpufreq.chart.py \
cpuidle.chart.py \
dns_query_time.chart.py \
@@ -101,3 +102,68 @@ dist_pythonyaml3_DATA = \
python_modules/pyyaml3/serializer.py \
python_modules/pyyaml3/tokens.py \
$(NULL)
+
+python_urllib3dir=$(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir=$(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir=$(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir=$(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
+
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
index 33efd42d9..104f4f1cf 100644
--- a/python.d/Makefile.in
+++ b/python.d/Makefile.in
@@ -81,7 +81,13 @@ build_triplet = @build@
host_triplet = @host@
DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
$(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
- $(dist_python_DATA) $(dist_pythonmodules_DATA) \
+ $(dist_python_DATA) $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \
$(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
subdir = python.d
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -127,6 +133,13 @@ am__uninstall_files_from_dir = { \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(python_urllib3dir)" \
+ "$(DESTDIR)$(python_urllib3_backportsdir)" \
+ "$(DESTDIR)$(python_urllib3_contribdir)" \
+ "$(DESTDIR)$(python_urllib3_packagesdir)" \
+ "$(DESTDIR)$(python_urllib3_securetransportdir)" \
+ "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
+ "$(DESTDIR)$(python_urllib3_utildir)" \
"$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \
"$(DESTDIR)$(pythonyaml3dir)"
SCRIPTS = $(dist_python_SCRIPTS)
@@ -149,7 +162,13 @@ am__can_run_installinfo = \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
-DATA = $(dist_python_DATA) $(dist_pythonmodules_DATA) \
+DATA = $(dist_python_DATA) $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \
$(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -312,6 +331,7 @@ dist_python_DATA = \
apache.chart.py \
apache_cache.chart.py \
bind_rndc.chart.py \
+ chrony.chart.py \
cpufreq.chart.py \
cpuidle.chart.py \
dns_query_time.chart.py \
@@ -398,6 +418,70 @@ dist_pythonyaml3_DATA = \
python_modules/pyyaml3/tokens.py \
$(NULL)
+python_urllib3dir = $(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir = $(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir = $(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir = $(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
+
all: all-am
.SUFFIXES:
@@ -489,6 +573,153 @@ uninstall-dist_pythonDATA:
@list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_backportsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_contribDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_packagesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_securetransportDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_utilDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
@$(NORMAL_INSTALL)
@list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
@@ -593,7 +824,7 @@ check-am: all-am
check: check-am
all-am: Makefile $(SCRIPTS) $(DATA)
installdirs:
- for dir in "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)"; do \
+ for dir in "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
@@ -649,6 +880,13 @@ info: info-am
info-am:
install-data-am: install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
install-dist_pythonyaml3DATA
@@ -695,6 +933,13 @@ ps: ps-am
ps-am:
uninstall-am: uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
uninstall-dist_pythonmodulesDATA \
uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA
@@ -704,16 +949,30 @@ uninstall-am: uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
ctags-am distclean distclean-generic distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dist_pythonDATA \
- install-dist_pythonSCRIPTS install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_pythonDATA \
- uninstall-dist_pythonSCRIPTS uninstall-dist_pythonmodulesDATA \
+ install-dist_pythonSCRIPTS install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
+ install-dist_pythonyaml3DATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonmodulesDATA \
uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA
.in:
diff --git a/python.d/README.md b/python.d/README.md
index c4504a7c5..1b04ccdf3 100644
--- a/python.d/README.md
+++ b/python.d/README.md
@@ -186,6 +186,38 @@ If no configuration is given, module will attempt to read named.stats file at `
---
+# chrony
+
+This module monitors the precision and statistics of a local chronyd server.
+
+It produces:
+
+* frequency
+* last offset
+* RMS offset
+* residual freq
+* root delay
+* root dispersion
+* skew
+* system time
+
+**Requirements:**
+Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+
+### Configuration
+
+Sample:
+```yaml
+# data collection frequency:
+update_every: 1
+
+# chrony query command:
+local:
+ command: 'chronyc -n tracking'
+```
+
+---
+
# cpufreq
This module shows the current CPU frequency as set by the cpufreq kernel
diff --git a/python.d/chrony.chart.py b/python.d/chrony.chart.py
new file mode 100644
index 000000000..96d7e696e
--- /dev/null
+++ b/python.d/chrony.chart.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Description: chrony netdata python.d module
+# Author: Dominik Schloesser (domschl)
+
+from base import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 10
+priority = 60000
+retries = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
+
+CHARTS = {
+ # id: {
+ # 'options': [name, title, units, family, context, charttype],
+ # 'lines': [
+ # [unique_dimension_name, name, algorithm, multiplier, divisor]
+ # ]}
+ 'system': {
+ 'options': [None, "Chrony System Time Deltas", "microseconds", 'system', 'chrony.system', 'area'],
+ 'lines': [
+ ['timediff', 'system time', 'absolute', 1, 1000]
+ ]},
+ 'offsets': {
+ 'options': [None, "Chrony System Time Offsets", "microseconds", 'system', 'chrony.offsets', 'area'],
+ 'lines': [
+ ['lastoffset', 'last offset', 'absolute', 1, 1000],
+ ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
+ ]},
+ 'stratum': {
+ 'options': [None, "Chrony Stratum", "stratum", 'root', 'chrony.stratum', 'line'],
+ 'lines': [
+ ['stratum', None, 'absolute', 1, 1]
+ ]},
+ 'root': {
+ 'options': [None, "Chrony Root Delays", "milliseconds", 'root', 'chrony.root', 'line'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, 1000000],
+ ['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
+ ]},
+ 'frequency': {
+ 'options': [None, "Chrony Frequency", "ppm", 'frequencies', 'chrony.frequency', 'area'],
+ 'lines': [
+ ['frequency', None, 'absolute', 1, 1000]
+ ]},
+ 'residualfreq': {
+ 'options': [None, "Chrony Residual frequency", "ppm", 'frequencies', 'chrony.residualfreq', 'area'],
+ 'lines': [
+ ['residualfreq', 'residual frequency', 'absolute', 1, 1000]
+ ]},
+ 'skew': {
+ 'options': [None, "Chrony Skew, error bound on frequency", "ppm", 'frequencies', 'chrony.skew', 'area'],
+ 'lines': [
+ ['skew', None, 'absolute', 1, 1000]
+ ]}
+}
+
+CHRONY = [('Frequency', 'frequency', 1e3),
+ ('Last offset', 'lastoffset', 1e9),
+ ('RMS offset', 'rmsoffset', 1e9),
+ ('Residual freq', 'residualfreq', 1e3),
+ ('Root delay', 'rootdelay', 1e9),
+ ('Root dispersion', 'rootdispersion', 1e9),
+ ('Skew', 'skew', 1e3),
+ ('Stratum', 'stratum', 1),
+ ('System time', 'timediff', 1e9)]
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(
+ self, configuration=configuration, name=name)
+ self.command = "chronyc -n tracking"
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ raw_data = (line.split(':', 1) for line in raw_data)
+ parsed, data = dict(), dict()
+
+ for line in raw_data:
+ try:
+ key, value = (l.strip() for l in line)
+ except ValueError:
+ continue
+ if value:
+ parsed[key] = value.split()[0]
+
+ for key, dim_id, multiplier in CHRONY:
+ try:
+ data[dim_id] = int(float(parsed[key]) * multiplier)
+ except (KeyError, ValueError):
+ continue
+
+ return data or None
diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py
index d5544b7ba..01cc22b02 100644
--- a/python.d/cpufreq.chart.py
+++ b/python.d/cpufreq.chart.py
@@ -38,20 +38,31 @@ class Service(SimpleService):
data = {}
if self.accurate_exists:
- elapsed = time.time() - self.timetable['last']
-
accurate_ok = True
for name, paths in self.assignment.items():
last = self.accurate_last[name]
- current = 0
+
+ current = {}
+ deltas = {}
+ ticks_since_last = 0
+
for line in open(paths['accurate'], 'r'):
line = list(map(int, line.split()))
- current += (line[0] * line[1]) / 100
- delta = current - last
- data[name] = delta
+ current[line[0]] = line[1]
+ ticks = line[1] - last.get(line[0], 0)
+ ticks_since_last += ticks
+ deltas[line[0]] = line[1] - last.get(line[0], 0)
+
+ avg_freq = 0
+ if ticks_since_last != 0:
+ for frequency, ticks in deltas.items():
+ avg_freq += frequency * ticks
+ avg_freq /= ticks_since_last
+
+ data[name] = avg_freq
self.accurate_last[name] = current
- if delta == 0 or abs(delta) > 1e7:
+ if avg_freq == 0 or ticks_since_last == 0:
# Delta is either too large or nonexistent, fall back to
# less accurate reading. This can happen if we switch
# to/from the 'schedutil' governor, which doesn't report
@@ -60,9 +71,6 @@ class Service(SimpleService):
if accurate_ok:
return data
- else:
- self.alert("accurate method failed, falling back")
- self.accurate_exists = False
for name, paths in self.assignment.items():
@@ -84,7 +92,7 @@ class Service(SimpleService):
if cpu not in self.assignment:
self.assignment[cpu] = {}
self.assignment[cpu]['accurate'] = path
- self.accurate_last[cpu] = 0
+ self.accurate_last[cpu] = {}
if len(self.assignment) == 0:
self.accurate_exists = False
diff --git a/python.d/cpuidle.chart.py b/python.d/cpuidle.chart.py
index f7199aebd..e5ed49bd2 100644
--- a/python.d/cpuidle.chart.py
+++ b/python.d/cpuidle.chart.py
@@ -26,6 +26,7 @@ class Service(SimpleService):
self.definitions = {}
self._orig_name = ""
self.assignment = {}
+ self.last_schedstat = None
def __gettid(self):
# This is horrendous. We need the *thread id* (not the *process id*),
@@ -42,13 +43,13 @@ class Service(SimpleService):
tid = syscall(syscalls[platform.machine()])
return tid
- def __wake_cpus(self):
+ def __wake_cpus(self, cpus):
# Requires Python 3.3+. This will "tickle" each CPU to force it to
# update its idle counters.
if hasattr(os, 'sched_setaffinity'):
pid = self.__gettid()
save_affinity = os.sched_getaffinity(pid)
- for idx in range(0, len(self.assignment)):
+ for idx in cpus:
os.sched_setaffinity(pid, [idx])
os.sched_getaffinity(pid)
os.sched_setaffinity(pid, save_affinity)
@@ -67,14 +68,31 @@ class Service(SimpleService):
def _get_data(self):
results = {}
- # This line is critical for the stats to update. If we don't "tickle"
- # all the CPUs, then all the counters stop counting.
- self.__wake_cpus()
-
# Use the kernel scheduler stats to determine how much time was spent
# in C0 (active).
schedstat = self.__read_schedstat()
+ # Determine if any of the CPUs are idle. If they are, then we need to
+ # tickle them in order to update their C-state residency statistics.
+ if self.last_schedstat is None:
+ needs_tickle = list(self.assignment.keys())
+ else:
+ needs_tickle = []
+ for cpu, active_time in self.last_schedstat.items():
+ delta = schedstat[cpu] - active_time
+ if delta < 1:
+ needs_tickle.append(cpu)
+
+ if needs_tickle:
+ # This line is critical for the stats to update. If we don't "tickle"
+ # idle CPUs, then the counters for those CPUs stop counting.
+ self.__wake_cpus([int(cpu[3:]) for cpu in needs_tickle])
+
+ # Re-read schedstat now that we've tickled any idlers.
+ schedstat = self.__read_schedstat()
+
+ self.last_schedstat = schedstat
+
for cpu, metrics in self.assignment.items():
update_time = schedstat[cpu]
results[cpu + '_active_time'] = update_time
diff --git a/python.d/dovecot.chart.py b/python.d/dovecot.chart.py
index 60e8bf6ef..b2bef4956 100644
--- a/python.d/dovecot.chart.py
+++ b/python.d/dovecot.chart.py
@@ -42,8 +42,8 @@ CHARTS = {
'context_switches': {
'options': [None, "Dovecot Context Switches", '', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [
- ['vol_cs', 'volountary', 'absolute'],
- ['invol_cs', 'involountary', 'absolute']
+ ['vol_cs', 'voluntary', 'absolute'],
+ ['invol_cs', 'involuntary', 'absolute']
]},
'io': {
'options': [None, "Dovecot Disk I/O", 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py
index 9ec08719c..2e0f18c0f 100644
--- a/python.d/elasticsearch.chart.py
+++ b/python.d/elasticsearch.chart.py
@@ -2,15 +2,16 @@
# Description: elastic search node stats netdata python.d module
# Author: l2isbad
-from base import UrlService
+from collections import namedtuple
+from json import loads
from socket import gethostbyname, gaierror
+from threading import Thread
try:
from queue import Queue
except ImportError:
from Queue import Queue
-from threading import Thread
-from collections import namedtuple
-from json import loads
+
+from base import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -18,214 +19,216 @@ update_every = 5
priority = 60000
retries = 60
-METHODS = namedtuple('METHODS', ['get_data_function', 'url'])
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
NODE_STATS = [
- ('indices.search.fetch_current', None, None),
- ('indices.search.fetch_total', None, None),
- ('indices.search.query_current', None, None),
- ('indices.search.query_total', None, None),
- ('indices.search.query_time_in_millis', None, None),
- ('indices.search.fetch_time_in_millis', None, None),
- ('indices.indexing.index_total', 'indexing_index_total', None),
- ('indices.indexing.index_current', 'indexing_index_current', None),
- ('indices.indexing.index_time_in_millis', 'indexing_index_time_in_millis', None),
- ('indices.refresh.total', 'refresh_total', None),
- ('indices.refresh.total_time_in_millis', 'refresh_total_time_in_millis', None),
- ('indices.flush.total', 'flush_total', None),
- ('indices.flush.total_time_in_millis', 'flush_total_time_in_millis', None),
- ('jvm.gc.collectors.young.collection_count', 'young_collection_count', None),
- ('jvm.gc.collectors.old.collection_count', 'old_collection_count', None),
- ('jvm.gc.collectors.young.collection_time_in_millis', 'young_collection_time_in_millis', None),
- ('jvm.gc.collectors.old.collection_time_in_millis', 'old_collection_time_in_millis', None),
- ('jvm.mem.heap_used_percent', 'jvm_heap_percent', None),
- ('jvm.mem.heap_committed_in_bytes', 'jvm_heap_commit', None),
- ('thread_pool.bulk.queue', 'bulk_queue', None),
- ('thread_pool.bulk.rejected', 'bulk_rejected', None),
- ('thread_pool.index.queue', 'index_queue', None),
- ('thread_pool.index.rejected', 'index_rejected', None),
- ('thread_pool.search.queue', 'search_queue', None),
- ('thread_pool.search.rejected', 'search_rejected', None),
- ('thread_pool.merge.queue', 'merge_queue', None),
- ('thread_pool.merge.rejected', 'merge_rejected', None),
- ('indices.fielddata.memory_size_in_bytes', 'index_fdata_memory', None),
- ('indices.fielddata.evictions', None, None),
- ('breakers.fielddata.tripped', None, None),
- ('http.current_open', 'http_current_open', None),
- ('transport.rx_size_in_bytes', 'transport_rx_size_in_bytes', None),
- ('transport.tx_size_in_bytes', 'transport_tx_size_in_bytes', None),
- ('process.max_file_descriptors', None, None),
- ('process.open_file_descriptors', None, None)
+ 'indices.search.fetch_current',
+ 'indices.search.fetch_total',
+ 'indices.search.query_current',
+ 'indices.search.query_total',
+ 'indices.search.query_time_in_millis',
+ 'indices.search.fetch_time_in_millis',
+ 'indices.indexing.index_total',
+ 'indices.indexing.index_current',
+ 'indices.indexing.index_time_in_millis',
+ 'indices.refresh.total',
+ 'indices.refresh.total_time_in_millis',
+ 'indices.flush.total'
+ 'indices.flush.total_time_in_millis',
+ 'jvm.gc.collectors.young.collection_count',
+ 'jvm.gc.collectors.old.collection_count',
+ 'jvm.gc.collectors.young.collection_time_in_millis',
+ 'jvm.gc.collectors.old.collection_time_in_millis',
+ 'jvm.mem.heap_used_percent',
+ 'jvm.mem.heap_committed_in_bytes',
+ 'thread_pool.bulk.queue'
+ 'thread_pool.bulk.rejected',
+ 'thread_pool.index.queue',
+ 'thread_pool.index.rejected',
+ 'thread_pool.search.queue',
+ 'thread_pool.search.rejected',
+ 'thread_pool.merge.queue',
+ 'thread_pool.merge.rejected',
+ 'indices.fielddata.memory_size_in_bytes',
+ 'indices.fielddata.evictions',
+ 'breakers.fielddata.tripped',
+ 'http.current_open',
+ 'transport.rx_size_in_bytes',
+ 'transport.tx_size_in_bytes',
+ 'process.max_file_descriptors',
+ 'process.open_file_descriptors'
]
CLUSTER_STATS = [
- ('nodes.count.data_only', 'count_data_only', None),
- ('nodes.count.master_data', 'count_master_data', None),
- ('nodes.count.total', 'count_total', None),
- ('nodes.count.master_only', 'count_master_only', None),
- ('nodes.count.client', 'count_client', None),
- ('indices.docs.count', 'docs_count', None),
- ('indices.query_cache.hit_count', 'query_cache_hit_count', None),
- ('indices.query_cache.miss_count', 'query_cache_miss_count', None),
- ('indices.store.size_in_bytes', 'store_size_in_bytes', None),
- ('indices.count', 'indices_count', None),
- ('indices.shards.total', 'shards_total', None)
+ 'nodes.count.data_only',
+ 'nodes.count.master_data',
+ 'nodes.count.total',
+ 'nodes.count.master_only',
+ 'nodes.count.client',
+ 'indices.docs.count',
+ 'indices.query_cache.hit_count',
+ 'indices.query_cache.miss_count',
+ 'indices.store.size_in_bytes',
+ 'indices.count',
+ 'indices.shards.total'
]
HEALTH_STATS = [
- ('number_of_nodes', 'health_number_of_nodes', None),
- ('number_of_data_nodes', 'health_number_of_data_nodes', None),
- ('number_of_pending_tasks', 'health_number_of_pending_tasks', None),
- ('number_of_in_flight_fetch', 'health_number_of_in_flight_fetch', None),
- ('active_shards', 'health_active_shards', None),
- ('relocating_shards', 'health_relocating_shards', None),
- ('unassigned_shards', 'health_unassigned_shards', None),
- ('delayed_unassigned_shards', 'health_delayed_unassigned_shards', None),
- ('initializing_shards', 'health_initializing_shards', None),
- ('active_shards_percent_as_number', 'health_active_shards_percent_as_number', None)
+ 'number_of_nodes',
+ 'number_of_data_nodes',
+ 'number_of_pending_tasks',
+ 'number_of_in_flight_fetch',
+ 'active_shards',
+ 'relocating_shards',
+ 'unassigned_shards',
+ 'delayed_unassigned_shards',
+ 'initializing_shards',
+ 'active_shards_percent_as_number'
]
LATENCY = {
'query_latency':
- {'total': 'query_total',
- 'spent_time': 'query_time_in_millis'},
+ {'total': 'indices_search_query_total',
+ 'spent_time': 'indices_search_query_time_in_millis'},
'fetch_latency':
- {'total': 'fetch_total',
- 'spent_time': 'fetch_time_in_millis'},
+ {'total': 'indices_search_fetch_total',
+ 'spent_time': 'indices_search_fetch_time_in_millis'},
'indexing_latency':
- {'total': 'indexing_index_total',
- 'spent_time': 'indexing_index_time_in_millis'},
+ {'total': 'indices_indexing_index_total',
+ 'spent_time': 'indices_indexing_index_time_in_millis'},
'flushing_latency':
- {'total': 'flush_total',
- 'spent_time': 'flush_total_time_in_millis'}
+ {'total': 'indices_flush_total',
+ 'spent_time': 'indices_flush_total_time_in_millis'}
}
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['search_perf_total', 'search_perf_current', 'search_perf_time', 'search_latency', 'index_perf_total',
- 'index_perf_current', 'index_perf_time', 'index_latency', 'jvm_mem_heap', 'jvm_gc_count',
- 'jvm_gc_time', 'host_metrics_file_descriptors', 'host_metrics_http', 'host_metrics_transport',
- 'thread_pool_qr_q', 'thread_pool_qr_r', 'fdata_cache', 'fdata_ev_tr', 'cluster_health_status',
- 'cluster_health_nodes', 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache',
- 'cluster_stats_docs', 'cluster_stats_store', 'cluster_stats_indices_shards']
+ORDER = ['search_performance_total', 'search_performance_current', 'search_performance_time',
+ 'search_latency', 'index_performance_total', 'index_performance_current', 'index_performance_time',
+ 'index_latency', 'jvm_mem_heap', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors',
+ 'host_metrics_http', 'host_metrics_transport', 'thread_pool_queued', 'thread_pool_rejected',
+ 'fielddata_cache', 'fielddata_evictions_tripped', 'cluster_health_status', 'cluster_health_nodes',
+ 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache', 'cluster_stats_docs',
+ 'cluster_stats_store', 'cluster_stats_indices_shards']
CHARTS = {
- 'search_perf_total': {
+ 'search_performance_total': {
'options': [None, 'Queries And Fetches', 'number of', 'search performance',
- 'es.search_query_total', 'stacked'],
+ 'elastic.search_performance_total', 'stacked'],
'lines': [
- ['query_total', 'queries', 'incremental'],
- ['fetch_total', 'fetches', 'incremental']
+ ['indices_search_query_total', 'queries', 'incremental'],
+ ['indices_search_fetch_total', 'fetches', 'incremental']
]},
- 'search_perf_current': {
+ 'search_performance_current': {
'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
- 'es.search_query_current', 'stacked'],
+ 'elastic.search_performance_current', 'stacked'],
'lines': [
- ['query_current', 'queries', 'absolute'],
- ['fetch_current', 'fetches', 'absolute']
+ ['indices_search_query_current', 'queries', 'absolute'],
+ ['indices_search_fetch_current', 'fetches', 'absolute']
]},
- 'search_perf_time': {
+ 'search_performance_time': {
'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
- 'es.search_time', 'stacked'],
+ 'elastic.search_performance_time', 'stacked'],
'lines': [
- ['query_time_in_millis', 'query', 'incremental', 1, 1000],
- ['fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
+ ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
+ ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
]},
'search_latency': {
- 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'es.search_latency', 'stacked'],
+ 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
]},
- 'index_perf_total': {
+ 'index_performance_total': {
'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
- 'indexing performance', 'es.index_performance_total', 'stacked'],
+ 'indexing performance', 'elastic.index_performance_total', 'stacked'],
'lines': [
- ['indexing_index_total', 'indexed', 'incremental'],
- ['refresh_total', 'refreshes', 'incremental'],
- ['flush_total', 'flushes', 'incremental']
+ ['indices_indexing_index_total', 'indexed', 'incremental'],
+ ['indices_refresh_total', 'refreshes', 'incremental'],
+ ['indices_flush_total', 'flushes', 'incremental']
]},
- 'index_perf_current': {
+ 'index_performance_current': {
'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
- 'indexing performance', 'es.index_performance_current', 'stacked'],
+ 'indexing performance', 'elastic.index_performance_current', 'stacked'],
'lines': [
- ['indexing_index_current', 'documents', 'absolute']
+ ['indices_indexing_index_current', 'documents', 'absolute']
]},
- 'index_perf_time': {
+ 'index_performance_time': {
'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
- 'es.search_time', 'stacked'],
+ 'elastic.index_performance_time', 'stacked'],
'lines': [
- ['indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
- ['refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
- ['flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
+ ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
+ ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
+ ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
]},
'index_latency': {
'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
- 'es.index_latency', 'stacked'],
+ 'elastic.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
['flushing_latency', 'flushing', 'absolute', 1, 1000]
]},
'jvm_mem_heap': {
'options': [None, 'JVM Heap Currently in Use/Committed', 'percent/MB', 'memory usage and gc',
- 'es.jvm_heap', 'area'],
+ 'elastic.jvm_heap', 'area'],
'lines': [
- ['jvm_heap_percent', 'inuse', 'absolute'],
- ['jvm_heap_commit', 'commit', 'absolute', -1, 1048576]
+ ['jvm_mem_heap_used_percent', 'inuse', 'absolute'],
+ ['jvm_mem_heap_committed_in_bytes', 'commit', 'absolute', -1, 1048576]
]},
'jvm_gc_count': {
- 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'es.gc_count', 'stacked'],
+ 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
'lines': [
- ['young_collection_count', 'young', 'incremental'],
- ['old_collection_count', 'old', 'incremental']
+ ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
]},
'jvm_gc_time': {
- 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', 'es.gc_time', 'stacked'],
+ 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
+ 'elastic.gc_time', 'stacked'],
'lines': [
- ['young_collection_time_in_millis', 'young', 'incremental'],
- ['old_collection_time_in_millis', 'old', 'incremental']
+ ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
]},
- 'thread_pool_qr_q': {
+ 'thread_pool_queued': {
'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
- 'es.thread_pool_queued', 'stacked'],
+ 'elastic.thread_pool_queued', 'stacked'],
'lines': [
- ['bulk_queue', 'bulk', 'absolute'],
- ['index_queue', 'index', 'absolute'],
- ['search_queue', 'search', 'absolute'],
- ['merge_queue', 'merge', 'absolute']
+ ['thread_pool_bulk_queue', 'bulk', 'absolute'],
+ ['thread_pool_index_queue', 'index', 'absolute'],
+ ['thread_pool_search_queue', 'search', 'absolute'],
+ ['thread_pool_merge_queue', 'merge', 'absolute']
]},
- 'thread_pool_qr_r': {
+ 'thread_pool_rejected': {
'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
- 'es.thread_pool_rejected', 'stacked'],
+ 'elastic.thread_pool_rejected', 'stacked'],
'lines': [
- ['bulk_rejected', 'bulk', 'absolute'],
- ['index_rejected', 'index', 'absolute'],
- ['search_rejected', 'search', 'absolute'],
- ['merge_rejected', 'merge', 'absolute']
+ ['thread_pool_bulk_rejected', 'bulk', 'absolute'],
+ ['thread_pool_index_rejected', 'index', 'absolute'],
+ ['thread_pool_search_rejected', 'search', 'absolute'],
+ ['thread_pool_merge_rejected', 'merge', 'absolute']
]},
- 'fdata_cache': {
- 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'es.fdata_cache', 'line'],
+ 'fielddata_cache': {
+ 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
'lines': [
- ['index_fdata_memory', 'cache', 'absolute', 1, 1048576]
+ ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
]},
- 'fdata_ev_tr': {
+ 'fielddata_evictions_tripped': {
'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
- 'fielddata cache', 'es.evictions_tripped', 'line'],
+ 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
'lines': [
- ['evictions', None, 'incremental'],
- ['tripped', None, 'incremental']
+ ['indices_fielddata_evictions', 'evictions', 'incremental'],
+ ['indices_fielddata_tripped', 'tripped', 'incremental']
]},
'cluster_health_nodes': {
'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
- 'es.cluster_health_nodes', 'stacked'],
+ 'elastic.cluster_health_nodes', 'stacked'],
'lines': [
- ['health_number_of_nodes', 'nodes', 'absolute'],
- ['health_number_of_data_nodes', 'data_nodes', 'absolute'],
- ['health_number_of_pending_tasks', 'pending_tasks', 'absolute'],
- ['health_number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
+ ['number_of_nodes', 'nodes', 'absolute'],
+ ['number_of_data_nodes', 'data_nodes', 'absolute'],
+ ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
]},
'cluster_health_status': {
'options': [None, 'Cluster Status', 'status', 'cluster health API',
- 'es.cluster_health_status', 'area'],
+ 'elastic.cluster_health_status', 'area'],
'lines': [
['status_green', 'green', 'absolute'],
['status_red', 'red', 'absolute'],
@@ -236,67 +239,67 @@ CHARTS = {
]},
'cluster_health_shards': {
'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
- 'es.cluster_health_shards', 'stacked'],
+ 'elastic.cluster_health_shards', 'stacked'],
'lines': [
- ['health_active_shards', 'active_shards', 'absolute'],
- ['health_relocating_shards', 'relocating_shards', 'absolute'],
- ['health_unassigned_shards', 'unassigned', 'absolute'],
- ['health_delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
- ['health_initializing_shards', 'initializing', 'absolute'],
- ['health_active_shards_percent_as_number', 'active_percent', 'absolute']
+ ['active_shards', 'active_shards', 'absolute'],
+ ['relocating_shards', 'relocating_shards', 'absolute'],
+ ['unassigned_shards', 'unassigned', 'absolute'],
+ ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
+ ['initializing_shards', 'initializing', 'absolute'],
+ ['active_shards_percent_as_number', 'active_percent', 'absolute']
]},
'cluster_stats_nodes': {
'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
- 'es.cluster_nodes', 'stacked'],
+ 'elastic.cluster_nodes', 'stacked'],
'lines': [
- ['count_data_only', 'data_only', 'absolute'],
- ['count_master_data', 'master_data', 'absolute'],
- ['count_total', 'total', 'absolute'],
- ['count_master_only', 'master_only', 'absolute'],
- ['count_client', 'client', 'absolute']
+ ['nodes_count_data_only', 'data_only', 'absolute'],
+ ['nodes_count_master_data', 'master_data', 'absolute'],
+ ['nodes_count_total', 'total', 'absolute'],
+ ['nodes_count_master_only', 'master_only', 'absolute'],
+ ['nodes_count_client', 'client', 'absolute']
]},
'cluster_stats_query_cache': {
'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
- 'es.cluster_query_cache', 'stacked'],
+ 'elastic.cluster_query_cache', 'stacked'],
'lines': [
- ['query_cache_hit_count', 'hit', 'incremental'],
- ['query_cache_miss_count', 'miss', 'incremental']
+ ['indices_query_cache_hit_count', 'hit', 'incremental'],
+ ['indices_query_cache_miss_count', 'miss', 'incremental']
]},
'cluster_stats_docs': {
'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
- 'es.cluster_docs', 'line'],
+ 'elastic.cluster_docs', 'line'],
'lines': [
- ['docs_count', 'docs', 'absolute']
+ ['indices_docs_count', 'docs', 'absolute']
]},
'cluster_stats_store': {
'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
- 'es.cluster_store', 'line'],
+ 'elastic.cluster_store', 'line'],
'lines': [
- ['store_size_in_bytes', 'size', 'absolute', 1, 1048567]
+ ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
]},
'cluster_stats_indices_shards': {
'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
- 'es.cluster_indices_shards', 'stacked'],
+ 'elastic.cluster_indices_shards', 'stacked'],
'lines': [
['indices_count', 'indices', 'absolute'],
- ['shards_total', 'shards', 'absolute']
+ ['indices_shards_total', 'shards', 'absolute']
]},
'host_metrics_transport': {
'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
- 'es.host_transport', 'area'],
+ 'elastic.host_transport', 'area'],
'lines': [
['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
]},
'host_metrics_file_descriptors': {
'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
- 'es.host_descriptors', 'area'],
+ 'elastic.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
]},
'host_metrics_http': {
'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
- 'es.host_http_connections', 'line'],
+ 'elastic.host_http_connections', 'line'],
'lines': [
['http_current_open', 'opened', 'absolute', 1, 1]
]}
@@ -310,7 +313,9 @@ class Service(UrlService):
self.definitions = CHARTS
self.host = self.configuration.get('host')
self.port = self.configuration.get('port', 9200)
- self.scheme = self.configuration.get('scheme', 'http')
+ self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port)
self.latency = dict()
self.methods = list()
@@ -329,33 +334,19 @@ class Service(UrlService):
self.error(str(error))
return False
- scheme = 'http' if self.scheme == 'http' else 'https'
- # Add handlers (auth, self signed cert accept)
- self.url = '%s://%s:%s' % (scheme, self.host, self.port)
- self.opener = self._build_opener()
# Create URL for every Elasticsearch API
- url_node_stats = '%s://%s:%s/_nodes/_local/stats' % (scheme, self.host, self.port)
- url_cluster_health = '%s://%s:%s/_cluster/health' % (scheme, self.host, self.port)
- url_cluster_stats = '%s://%s:%s/_cluster/stats' % (scheme, self.host, self.port)
-
- user_choice = [bool(self.configuration.get('node_stats', True)),
- bool(self.configuration.get('cluster_health', True)),
- bool(self.configuration.get('cluster_stats', True))]
-
- avail_methods = [METHODS(get_data_function=self._get_node_stats_,
- url=url_node_stats),
- METHODS(get_data_function=self._get_cluster_health_,
- url=url_cluster_health),
- METHODS(get_data_function=self._get_cluster_stats_,
- url=url_cluster_stats)]
+ self.methods = [METHODS(get_data=self._get_node_stats,
+ url=self.url + '/_nodes/_local/stats',
+ run=self.configuration.get('node_stats', True)),
+ METHODS(get_data=self._get_cluster_health,
+ url=self.url + '/_cluster/health',
+ run=self.configuration.get('cluster_health', True)),
+ METHODS(get_data=self._get_cluster_stats,
+ url=self.url + '/_cluster/stats',
+ run=self.configuration.get('cluster_stats', True))]
# Remove disabled API calls from 'avail methods'
- self.methods = [avail_methods[e[0]] for e in enumerate(avail_methods) if user_choice[e[0]]]
- data = self._get_data()
- if not data:
- return False
- self._data_from_check = data
- return True
+ return UrlService.check(self)
def _get_data(self):
threads = list()
@@ -363,7 +354,9 @@ class Service(UrlService):
result = dict()
for method in self.methods:
- th = Thread(target=method.get_data_function,
+ if not method.run:
+ continue
+ th = Thread(target=method.get_data,
args=(queue, method.url))
th.start()
threads.append(th)
@@ -374,7 +367,7 @@ class Service(UrlService):
return result or None
- def _get_cluster_health_(self, queue, url):
+ def _get_cluster_health(self, queue, url):
"""
Format data received from http request
:return: dict
@@ -387,7 +380,7 @@ class Service(UrlService):
data = loads(raw_data)
to_netdata = fetch_data_(raw_data=data,
- metrics_list=HEALTH_STATS)
+ metrics=HEALTH_STATS)
to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
@@ -396,7 +389,7 @@ class Service(UrlService):
return queue.put(to_netdata)
- def _get_cluster_stats_(self, queue, url):
+ def _get_cluster_stats(self, queue, url):
"""
Format data received from http request
:return: dict
@@ -409,11 +402,11 @@ class Service(UrlService):
data = loads(raw_data)
to_netdata = fetch_data_(raw_data=data,
- metrics_list=CLUSTER_STATS)
+ metrics=CLUSTER_STATS)
return queue.put(to_netdata)
- def _get_node_stats_(self, queue, url):
+ def _get_node_stats(self, queue, url):
"""
Format data received from http request
:return: dict
@@ -428,23 +421,23 @@ class Service(UrlService):
node = list(data['nodes'].keys())[0]
to_netdata = fetch_data_(raw_data=data['nodes'][node],
- metrics_list=NODE_STATS)
+ metrics=NODE_STATS)
# Search, index, flush, fetch performance latency
for key in LATENCY:
try:
- to_netdata[key] = self.find_avg_(total=to_netdata[LATENCY[key]['total']],
- spent_time=to_netdata[LATENCY[key]['spent_time']],
- key=key)
+ to_netdata[key] = self.find_avg(total=to_netdata[LATENCY[key]['total']],
+ spent_time=to_netdata[LATENCY[key]['spent_time']],
+ key=key)
except KeyError:
continue
- if 'open_file_descriptors' in to_netdata and 'max_file_descriptors' in to_netdata:
- to_netdata['file_descriptors_used'] = round(float(to_netdata['open_file_descriptors'])
- / to_netdata['max_file_descriptors'] * 1000)
+ if 'process_open_file_descriptors' in to_netdata and 'process_max_file_descriptors' in to_netdata:
+ to_netdata['file_descriptors_used'] = round(float(to_netdata['process_open_file_descriptors'])
+ / to_netdata['process_max_file_descriptors'] * 1000)
return queue.put(to_netdata)
- def find_avg_(self, total, spent_time, key):
+ def find_avg(self, total, spent_time, key):
if key not in self.latency:
self.latency[key] = dict(total=total,
spent_time=spent_time)
@@ -459,17 +452,15 @@ class Service(UrlService):
return 0
-def fetch_data_(raw_data, metrics_list):
- to_netdata = dict()
- for metric, new_name, func in metrics_list:
+def fetch_data_(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
value = raw_data
- for key in metric.split('.'):
- try:
- value = value[key]
- except KeyError:
- break
- if not isinstance(value, dict) and key:
- to_netdata[new_name or key] = value if not func else func(value)
-
- return to_netdata
-
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
index 35761e894..5238fa16e 100644
--- a/python.d/fail2ban.chart.py
+++ b/python.d/fail2ban.chart.py
@@ -11,8 +11,8 @@ from base import LogService
priority = 60000
retries = 60
-REGEX_JAILS = r_compile(r'\[([A-Za-z-_0-9]+)][^\[\]]*?(?<!# )enabled = (?:(true|false))')
-REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>(?:(U|B)))[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
+REGEX_JAILS = r_compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
+REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>U|B)[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
ORDER = ['jails_bans', 'jails_in_jail']
@@ -98,7 +98,6 @@ class Service(LogService):
jails_list.append(jail)
elif status == 'false' and jail in jails_list:
jails_list.remove(jail)
-
# If for some reason parse failed we still can START with default jails_list.
jails_list = list(set(jails_list) - set(self.exclude.split()
if isinstance(self.exclude, str) else list())) or ['ssh']
@@ -182,8 +181,8 @@ def find_jails_in_files(list_of_files, print_error):
for conf in list_of_files:
if is_accessible(conf, R_OK):
with open(conf, 'rt') as conf:
- raw_data = conf.read()
- data = ' '.join(raw_data.split())
+ raw_data = conf.readlines()
+ data = ' '.join(line for line in raw_data if line.startswith(('[', 'enabled')))
jails_list.extend(REGEX_JAILS.findall(data))
else:
print_error('%s is not readable or not exist' % conf)
diff --git a/python.d/haproxy.chart.py b/python.d/haproxy.chart.py
index 67a6f7821..a9ee66650 100644
--- a/python.d/haproxy.chart.py
+++ b/python.d/haproxy.chart.py
@@ -2,7 +2,6 @@
# Description: haproxy netdata python.d module
# Author: l2isbad
-from base import UrlService, SocketService
from collections import defaultdict
from re import compile as re_compile
@@ -11,6 +10,8 @@ try:
except ImportError:
from urllib.parse import urlparse
+from base import UrlService, SocketService
+
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
@@ -73,7 +74,8 @@ METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024},
'scur': {'algorithm': 'absolute', 'divisor': 1},
'qcur': {'algorithm': 'absolute', 'divisor': 1}}
-REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'), socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
+ socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
class Service(UrlService, SocketService):
@@ -81,11 +83,15 @@ class Service(UrlService, SocketService):
if 'socket' in configuration:
SocketService.__init__(self, configuration=configuration, name=name)
self.poll = SocketService
- self.options_ = dict(regex=REGEX['socket'], stat='show stat\n', info='show info\n')
+ self.options_ = dict(regex=REGEX['socket'],
+ stat='show stat\n'.encode(),
+ info='show info\n'.encode())
else:
UrlService.__init__(self, configuration=configuration, name=name)
self.poll = UrlService
- self.options_ = dict(regex=REGEX['url'], stat=self.url, info=url_remove_params(self.url))
+ self.options_ = dict(regex=REGEX['url'],
+ stat=self.url,
+ info=url_remove_params(self.url))
self.order = ORDER
self.definitions = CHARTS
@@ -208,4 +214,4 @@ def server_down(server, backend_name):
def url_remove_params(url):
parsed = urlparse(url or str())
- return '%s://%s%s' % (parsed.scheme, parsed.netloc, parsed.path)
+ return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py
index ca9aba564..7ce7b1932 100644
--- a/python.d/mdstat.chart.py
+++ b/python.d/mdstat.chart.py
@@ -20,7 +20,7 @@ class Service(SimpleService):
r'(?P<total_disks>[0-9]+)/'
r'(?P<inuse_disks>[0-9])\]'),
status=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
- r'(?P<operation>[a-z]+) = '
+ r'(?P<operation>[a-z]+) =[ ]{1,2}'
r'(?P<operation_status>[0-9.]+).+finish='
r'(?P<finish>([0-9.]+))min speed='
r'(?P<speed>[0-9]+)'))
diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py
index cdabe971d..6118f79f2 100644
--- a/python.d/mysql.chart.py
+++ b/python.d/mysql.chart.py
@@ -117,7 +117,10 @@ GLOBAL_STATS = [
'Connection_errors_tcpwrap']
def slave_seconds(value):
- return value if value is not '' else -1
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ return -1
def slave_running(value):
return 1 if value == 'Yes' else -1
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py
index ef710cb84..b17565e9d 100644
--- a/python.d/postgres.chart.py
+++ b/python.d/postgres.chart.py
@@ -118,30 +118,15 @@ GROUP BY datname, mode
ORDER BY datname, mode;
""",
FIND_DATABASES="""
-SELECT datname FROM pg_stat_database WHERE NOT datname ~* '^template\d+'
+SELECT datname
+FROM pg_stat_database
+WHERE has_database_privilege((SELECT current_user), datname, 'connect')
+AND NOT datname ~* '^template\d+';
""",
IF_SUPERUSER="""
SELECT current_setting('is_superuser') = 'on' AS is_superuser;
""")
-# REPLICATION = """
-# SELECT
-# client_hostname,
-# client_addr,
-# state,
-# sent_offset - (
-# replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
-# FROM (
-# SELECT
-# client_addr, client_hostname, state,
-# ('x' || lpad(split_part(sent_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
-# ('x' || lpad(split_part(replay_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
-# ('x' || lpad(split_part(sent_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
-# ('x' || lpad(split_part(replay_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
-# FROM pg_stat_replication
-# ) AS s;
-# """
-
QUERY_STATS = {
QUERIES['DATABASE']: METRICS['DATABASE'],
@@ -244,10 +229,10 @@ class Service(SimpleService):
self.database_poll = configuration.pop('database_poll', None)
self.configuration = configuration
self.connection = False
- self.is_superuser = False
self.data = dict()
self.locks_zeroed = dict()
self.databases = list()
+ self.queries = QUERY_STATS.copy()
def _connect(self):
params = dict(user='postgres',
@@ -294,12 +279,12 @@ class Service(SimpleService):
def add_additional_queries_(self, is_superuser):
if self.index_stats:
- QUERY_STATS[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
+ self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
if self.table_stats:
- QUERY_STATS[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
+ self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
if is_superuser:
- QUERY_STATS[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- QUERY_STATS[QUERIES['ARCHIVE']] = METRICS['ARCHIVE']
+ self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
+ self.queries[QUERIES['ARCHIVE']] = METRICS['ARCHIVE']
def create_dynamic_charts_(self):
@@ -318,7 +303,7 @@ class Service(SimpleService):
cursor = self.connection.cursor(cursor_factory=DictCursor)
try:
self.data.update(self.locks_zeroed)
- for query, metrics in QUERY_STATS.items():
+ for query, metrics in self.queries.items():
self.query_stats_(cursor, query, metrics)
except OperationalError:
@@ -398,13 +383,3 @@ def add_database_stat_chart_(order, definitions, name, database_name):
definitions[chart_name] = {
'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
'lines': create_lines(database_name, chart_template['lines'])}
-
-
-#
-# def add_replication_stats(self, cursor):
-# cursor.execute(REPLICATION)
-# temp = cursor.fetchall()
-# for row in temp:
-# self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
-# 'byte_lag',
-# int(row.get('byte_lag', 0)))
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
index a643cc6a0..1d5417ec2 100644
--- a/python.d/python_modules/base.py
+++ b/python.d/python_modules/base.py
@@ -17,38 +17,36 @@
#
# using ".encode()" in one thread can block other threads as well (only in python2)
-import time
import os
+import re
import socket
+import time
import threading
-import ssl
+
+import urllib3
+
+from glob import glob
from subprocess import Popen, PIPE
from sys import exc_info
-from glob import glob
-import re
-try:
- from urlparse import urlparse
-except ImportError:
- from urllib.parse import urlparse
-try:
- import urllib.request as urllib2
-except ImportError:
- import urllib2
+
try:
import MySQLdb
- PYMYSQL = True
+ PY_MYSQL = True
except ImportError:
try:
import pymysql as MySQLdb
- PYMYSQL = True
+ PY_MYSQL = True
except ImportError:
- PYMYSQL = False
+ PY_MYSQL = False
+
import msg
+
+PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
try:
- PATH = os.getenv('PATH').split(':')
+ urllib3.disable_warnings()
except AttributeError:
- PATH = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'.split(':')
+ msg.error('urllib3: warnings were not disabled')
# class BaseService(threading.Thread):
@@ -504,75 +502,68 @@ class UrlService(SimpleService):
self.url = self.configuration.get('url')
self.user = self.configuration.get('user')
self.password = self.configuration.get('pass')
- self.ss_cert = self.configuration.get('ss_cert')
- self.proxy = self.configuration.get('proxy')
-
- def __add_openers(self, user=None, password=None, ss_cert=None, proxy=None, url=None):
- user = user or self.user
- password = password or self.password
- ss_cert = ss_cert or self.ss_cert
- proxy = proxy or self.proxy
-
- handlers = list()
-
- # HTTP Basic Auth handler
- if all([user, password, isinstance(user, str), isinstance(password, str)]):
- url = url or self.url
- url_parse = urlparse(url)
- top_level_url = '://'.join([url_parse.scheme, url_parse.netloc])
- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- passman.add_password(None, top_level_url, user, password)
- handlers.append(urllib2.HTTPBasicAuthHandler(passman))
- self.debug("Enabling HTTP basic auth")
-
- # HTTPS handler
- # Self-signed certificate ignore
- if ss_cert:
- try:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
- except AttributeError:
- self.error('HTTPS self-signed certificate ignore not enabled')
- else:
- handlers.append(urllib2.HTTPSHandler(context=ctx))
- self.debug("Enabling HTTP self-signed certificate ignore")
-
- # PROXY handler
- if proxy and isinstance(proxy, str) and not ss_cert:
- handlers.append(urllib2.ProxyHandler(dict(http=proxy)))
- self.debug("Enabling HTTP proxy handler (%s)" % proxy)
-
- opener = urllib2.build_opener(*handlers)
- return opener
-
- def _build_opener(self, **kwargs):
+ self.proxy_user = self.configuration.get('proxy_user')
+ self.proxy_password = self.configuration.get('proxy_pass')
+ self.proxy_url = self.configuration.get('proxy_url')
+ self._manager = None
+
+ def __make_headers(self, **header_kw):
+ user = header_kw.get('user') or self.user
+ password = header_kw.get('pass') or self.password
+ proxy_user = header_kw.get('proxy_user') or self.proxy_user
+ proxy_password = header_kw.get('proxy_pass') or self.proxy_password
+ header_params = dict(keep_alive=True)
+ proxy_header_params = dict()
+ if user and password:
+ header_params['basic_auth'] = '{user}:{password}'.format(user=user,
+ password=password)
+ if proxy_user and proxy_password:
+ proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
+ password=proxy_password)
try:
- return self.__add_openers(**kwargs)
+ return urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
except TypeError as error:
- self.error('build_opener() error:', str(error))
+ self.error('build_header() error: {error}'.format(error=error))
+ return None, None
+
+ def _build_manager(self, **header_kw):
+ header, proxy_header = self.__make_headers(**header_kw)
+ if header is None or proxy_header is None:
+ return None
+ proxy_url = header_kw.get('proxy_url') or self.proxy_url
+ if proxy_url:
+ manager = urllib3.ProxyManager
+ params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
+ else:
+ manager = urllib3.PoolManager
+ params = dict(headers=header)
+ try:
+ return manager(**params)
+ except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
+ self.error('build_manager() error:', str(error))
return None
- def _get_raw_data(self, url=None, opener=None):
+ def _get_raw_data(self, url=None, manager=None):
"""
Get raw data from http request
:return: str
"""
- data = None
try:
- opener = opener or self.opener
- data = opener.open(url or self.url, timeout=self.update_every * 2)
- raw_data = data.read().decode('utf-8', 'ignore')
- except urllib2.URLError as error:
- self.error('Url: %s. Error: %s' % (url or self.url, str(error)))
- return None
- except Exception as error:
- self.error(str(error))
+ url = url or self.url
+ manager = manager or self._manager
+ # TODO: timeout, retries and method hardcoded..
+ response = manager.request(method='GET',
+ url=url,
+ timeout=1,
+ retries=1,
+ headers=manager.headers)
+ except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
return None
- finally:
- if data is not None:
- data.close()
- return raw_data or None
+ if response.status == 200:
+ return response.data.decode()
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url, code=response.status))
+ return None
def check(self):
"""
@@ -583,20 +574,21 @@ class UrlService(SimpleService):
self.error('URL is not defined or type is not <str>')
return False
- self.opener = self.__add_openers()
+ self._manager = self._build_manager()
+ if not self._manager:
+ return False
try:
data = self._get_data()
except Exception as error:
- self.error('_get_data() failed. Url: %s. Error: %s' % (self.url, error))
+ self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
return False
if isinstance(data, dict) and data:
self._data_from_check = data
return True
- else:
- self.error("_get_data() returned no data or type is not <dict>")
- return False
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
class SocketService(SimpleService):
@@ -1007,17 +999,20 @@ class MySQLService(SimpleService):
def check(self):
def get_connection_properties(conf, extra_conf):
properties = dict()
- if 'user' in conf and conf['user']:
+ if conf.get('user'):
properties['user'] = conf['user']
- if 'pass' in conf and conf['pass']:
+ if conf.get('pass'):
properties['passwd'] = conf['pass']
- if 'socket' in conf and conf['socket']:
+ if conf.get('socket'):
properties['unix_socket'] = conf['socket']
- elif 'host' in conf and conf['host']:
+ elif conf.get('host'):
properties['host'] = conf['host']
properties['port'] = int(conf.get('port', 3306))
- elif 'my.cnf' in conf and conf['my.cnf']:
- properties['read_default_file'] = conf['my.cnf']
+ elif conf.get('my.cnf'):
+ if MySQLdb.__name__ == 'pymysql':
+ self.error('"my.cnf" parsing is not working for pymysql')
+ else:
+ properties['read_default_file'] = conf['my.cnf']
if isinstance(extra_conf, dict) and extra_conf:
properties.update(extra_conf)
@@ -1046,7 +1041,7 @@ class MySQLService(SimpleService):
log_error('Unsupported "queries" format. Must be not empty <dict>')
return None
- if not PYMYSQL:
+ if not PY_MYSQL:
self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
return False
diff --git a/python.d/python_modules/urllib3/__init__.py b/python.d/python_modules/urllib3/__init__.py
new file mode 100644
index 000000000..26493ecb9
--- /dev/null
+++ b/python.d/python_modules/urllib3/__init__.py
@@ -0,0 +1,97 @@
+"""
+urllib3 - Thread-safe connection pooling and re-using.
+"""
+
+from __future__ import absolute_import
+import warnings
+
+from .connectionpool import (
+ HTTPConnectionPool,
+ HTTPSConnectionPool,
+ connection_from_url
+)
+
+from . import exceptions
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
+
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
+__license__ = 'MIT'
+__version__ = '1.21.1'
+
+__all__ = (
+ 'HTTPConnectionPool',
+ 'HTTPSConnectionPool',
+ 'PoolManager',
+ 'ProxyManager',
+ 'HTTPResponse',
+ 'Retry',
+ 'Timeout',
+ 'add_stderr_logger',
+ 'connection_from_url',
+ 'disable_warnings',
+ 'encode_multipart_formdata',
+ 'get_host',
+ 'make_headers',
+ 'proxy_from_url',
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug('Added a stderr logging handler to logger: %s', __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter('ignore', category)
diff --git a/python.d/python_modules/urllib3/_collections.py b/python.d/python_modules/urllib3/_collections.py
new file mode 100644
index 000000000..4849ddecd
--- /dev/null
+++ b/python.d/python_modules/urllib3/_collections.py
@@ -0,0 +1,314 @@
+from __future__ import absolute_import
+from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+try: # Python 2.7+
+ from collections import OrderedDict
+except ImportError:
+ from .packages.ordered_dict import OrderedDict
+from .packages.six import iterkeys, itervalues, PY3
+
+
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ', '.join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+ dict((k.lower(), v) for k, v in other.itermerged()))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if not PY3: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError("extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args)))
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ return []
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ', '.join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ headers = []
+
+ for line in message.headers:
+ if line.startswith((' ', '\t')):
+ key, value = headers[-1]
+ headers[-1] = (key, value + '\r\n' + line.rstrip())
+ continue
+
+ key, value = line.split(':', 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/python.d/python_modules/urllib3/connection.py b/python.d/python_modules/urllib3/connection.py
new file mode 100644
index 000000000..c0d832998
--- /dev/null
+++ b/python.d/python_modules/urllib3/connection.py
@@ -0,0 +1,373 @@
+from __future__ import absolute_import
+import datetime
+import logging
+import os
+import sys
+import socket
+from socket import error as SocketError, timeout as SocketTimeout
+import warnings
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+
+try: # Compiled with SSL?
+ import ssl
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError: # Python 2:
+ class ConnectionError(Exception):
+ pass
+
+
+from .exceptions import (
+ NewConnectionError,
+ ConnectTimeoutError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import match_hostname, CertificateError
+
+from .util.ssl_ import (
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ assert_fingerprint,
+ create_urllib3_context,
+ ssl_wrap_socket
+)
+
+
+from .util import connection
+
+from ._collections import HTTPHeaderDict
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {
+ 'http': 80,
+ 'https': 443,
+}
+
+# When updating RECENT_DATE, move it to
+# within two years of the current date, and no
+# earlier than 6 months ago.
+RECENT_DATE = datetime.date(2016, 1, 1)
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+ pass
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on httplib.HTTPConnection but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+
+ .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
+
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass::
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme['http']
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ def __init__(self, *args, **kw):
+ if six.PY3: # Python 3
+ kw.pop('strict', None)
+
+ # Pre-set source_address in case we have an older Python like 2.6.
+ self.source_address = kw.get('source_address')
+
+ if sys.version_info < (2, 7): # Python 2.6
+ # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
+ # not newer versions. We can still use it when creating a
+ # connection though, so we pop it *after* we have saved it as
+ # self.source_address.
+ kw.pop('source_address', None)
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+ # Superclass also sets self.source_address in Python 2.7+.
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ def _new_conn(self):
+ """ Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self.host, self.port), self.timeout, **extra_kw)
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ # the _tunnel_host attribute was added in python 2.6.3 (via
+ # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
+ # not have them.
+ if getattr(self, '_tunnel_host', None):
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = HTTPHeaderDict(headers if headers is not None else {})
+ skip_accept_encoding = 'accept-encoding' in headers
+ skip_host = 'host' in headers
+ self.putrequest(
+ method,
+ url,
+ skip_accept_encoding=skip_accept_encoding,
+ skip_host=skip_host
+ )
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if 'transfer-encoding' not in headers:
+ self.putheader('Transfer-Encoding', 'chunked')
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (six.binary_type,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, six.binary_type):
+ chunk = chunk.encode('utf8')
+ len_str = hex(len(chunk))[2:]
+ self.send(len_str.encode('utf-8'))
+ self.send(b'\r\n')
+ self.send(chunk)
+ self.send(b'\r\n')
+
+ # After the if clause, to always have a closed body
+ self.send(b'0\r\n\r\n')
+
+
+class HTTPSConnection(HTTPConnection):
+ default_port = port_by_scheme['https']
+
+ ssl_version = None
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None, **kw):
+
+ HTTPConnection.__init__(self, host, port, strict=strict,
+ timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ssl_context = ssl_context
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = 'https'
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(None),
+ cert_reqs=resolve_cert_reqs(None),
+ )
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ssl_context=self.ssl_context,
+ )
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+ """
+ Based on httplib.HTTPSConnection but wraps the socket with
+ SSL certification.
+ """
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ssl_version = None
+ assert_fingerprint = None
+
+ def set_cert(self, key_file=None, cert_file=None,
+ cert_reqs=None, ca_certs=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided, we can try to guess. If the user gave
+ # us a cert database, we assume they want to use it: otherwise, if
+ # they gave us an SSL Context object we should use whatever is set for
+ # it.
+ if cert_reqs is None:
+ if ca_certs or ca_cert_dir:
+ cert_reqs = 'CERT_REQUIRED'
+ elif self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+
+ hostname = self.host
+ if getattr(self, '_tunnel_host', None):
+ # _tunnel_host was added in Python 2.6.3
+ # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
+
+ self.sock = conn
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn((
+ 'System time is way off (before {0}). This will probably '
+ 'lead to SSL verification errors').format(RECENT_DATE),
+ SystemTimeWarning
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ server_hostname=hostname,
+ ssl_context=context)
+
+ if self.assert_fingerprint:
+ assert_fingerprint(self.sock.getpeercert(binary_form=True),
+ self.assert_fingerprint)
+ elif context.verify_mode != ssl.CERT_NONE \
+ and not getattr(context, 'check_hostname', False) \
+ and self.assert_hostname is not False:
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get('subjectAltName', ()):
+ warnings.warn((
+ 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+ '`commonName` for now. This feature is being removed by major browsers and '
+ 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+ 'for details.)'.format(hostname)),
+ SubjectAltNameWarning
+ )
+ _match_hostname(cert, self.assert_hostname or hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED or
+ self.assert_fingerprint is not None
+ )
+
+
+def _match_hostname(cert, asserted_hostname):
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.error(
+ 'Certificate did not match expected hostname: %s. '
+ 'Certificate: %s', asserted_hostname, cert
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+if ssl:
+ # Make a copy for testing.
+ UnverifiedHTTPSConnection = HTTPSConnection
+ HTTPSConnection = VerifiedHTTPSConnection
+else:
+ HTTPSConnection = DummyConnection
diff --git a/python.d/python_modules/urllib3/connectionpool.py b/python.d/python_modules/urllib3/connectionpool.py
new file mode 100644
index 000000000..b4f1166a6
--- /dev/null
+++ b/python.d/python_modules/urllib3/connectionpool.py
@@ -0,0 +1,899 @@
+from __future__ import absolute_import
+import errno
+import logging
+import sys
+import warnings
+
+from socket import error as SocketError, timeout as SocketTimeout
+import socket
+
+
+from .exceptions import (
+ ClosedPoolError,
+ ProtocolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ LocationValueError,
+ MaxRetryError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+ InsecureRequestWarning,
+ NewConnectionError,
+)
+from .packages.ssl_match_hostname import CertificateError
+from .packages import six
+from .packages.six.moves import queue
+from .connection import (
+ port_by_scheme,
+ DummyConnection,
+ HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
+ HTTPException, BaseSSLError,
+)
+from .request import RequestMethods
+from .response import HTTPResponse
+
+from .util.connection import is_connection_dropped
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host, Url
+
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows
+ import Queue as _unused_module_Queue # noqa: F401
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+ """
+
+ scheme = None
+ QueueCls = queue.LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _ipv6_host(host).lower()
+ self.port = port
+
+ def __str__(self):
+ return '%s(host=%r, port=%r)' % (type(self).__name__,
+ self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`httplib.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`httplib.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`httplib.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = 'http'
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(self, host, port=None, strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
+ headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ **conn_kw):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault('socket_options', [])
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTP connection (%d): %s",
+ self.num_connections, self.host)
+
+ conn = self.ConnectionCls(host=self.host, port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(self,
+ "Pool reached maximum size and no more "
+ "connections are allowed.")
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, 'auto_open', 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # httplib._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s",
+ self.host)
+
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """ Helper that always returns a :class:`urllib3.util.Timeout` """
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
+ **httplib_request_kw):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls httplib.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, 'sock', None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout)
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try: # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError: # Python 2.6 and older, Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except Exception as e:
+ # Remove the TypeError from the exception chain in Python 3;
+ # otherwise it looks like a programming error was the cause.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
+ log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
+ method, url, http_version, httplib_response.status,
+ httplib_response.length)
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except HeaderParsingError as hpe: # Platform-specific: Python 3
+ log.warning(
+ 'Failed to parse headers (url=%s): %s',
+ self._absolute_url(url), hpe, exc_info=True)
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith('/'):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+
+ host = _ipv6_host(host).lower()
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(self, method, url, body=None, headers=None, retries=None,
+ redirect=True, assert_same_host=True, timeout=_Default,
+ pool_timeout=None, release_conn=None, chunked=False,
+ body_pos=None, **response_kw):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param body:
+ Data to send in the request body (useful for creating
+ POST requests, see HTTPConnectionPool.post_url for
+ more convenience).
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When False, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get('preload_content', True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/shazow/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ # Merge the proxy headers. Only do this in HTTP. We have to copy the
+ # headers dict so we can safely change it without those changes being
+ # reflected in anyone else's copy.
+ if self.scheme == 'http':
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+ if is_new_proxy_conn:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(conn, method, url,
+ timeout=timeout_obj,
+ body=body, headers=headers,
+ chunked=chunked)
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw['request_method'] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw)
+
+ # Everything went great!
+ clean_exit = True
+
+ except queue.Empty:
+ # Timed out by queue.
+ raise EmptyPoolError(self, "No pool connections are available.")
+
+ except (BaseSSLError, CertificateError) as e:
+ # Close the connection. If a connection is reused on which there
+ # was a Certificate error, the next request will certainly raise
+ # another Certificate error.
+ clean_exit = False
+ raise SSLError(e)
+
+ except SSLError:
+ # Treat SSLError separately from BaseSSLError to preserve
+ # traceback.
+ clean_exit = False
+ raise
+
+ except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
+ # Discard the connection for these exceptions. It will be
+ # be replaced during the next _get_conn() call.
+ clean_exit = False
+
+ if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError('Cannot connect to proxy.', e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError('Connection aborted.', e)
+
+ retries = retries.increment(method, url, error=e, _pool=self,
+ _stacktrace=sys.exc_info()[2])
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning("Retrying (%r) after connection "
+ "broken by '%r': %s", retries, err, url)
+ return self.urlopen(method, url, body, headers, retries,
+ redirect, assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method, redirect_location, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader('Retry-After'))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method, url, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ body_pos=body_pos, **response_kw)
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ When Python is compiled with the :mod:`ssl` module, then
+ :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
+ instead of :class:`.HTTPSConnection`.
+
+ :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+ available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = 'https'
+ ConnectionCls = HTTPSConnection
+
+ def __init__(self, host, port=None,
+ strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+ block=False, headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ key_file=None, cert_file=None, cert_reqs=None,
+ ca_certs=None, ssl_version=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None, **conn_kw):
+
+ HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
+ block, headers, retries, _proxy, _proxy_headers,
+ **conn_kw)
+
+ if ca_certs and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(key_file=self.key_file,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint)
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establish tunnel connection early, because otherwise httplib
+ would improperly set Host: header to proxy's IP:port.
+ """
+ # Python 2.7+
+ try:
+ set_tunnel = conn.set_tunnel
+ except AttributeError: # Platform-specific: Python 2.6
+ set_tunnel = conn._set_tunnel
+
+ if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
+ set_tunnel(self.host, self.port)
+ else:
+ set_tunnel(self.host, self.port, self.proxy_headers)
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`httplib.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTPS connection (%d): %s",
+ self.num_connections, self.host)
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError("Can't connect to HTTPS URL because the SSL "
+ "module is not available.")
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(host=actual_host, port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn((
+ 'Unverified HTTPS request is being made. '
+ 'Adding certificate verification is strongly advised. See: '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings'),
+ InsecureRequestWarning)
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == 'https':
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _ipv6_host(host):
+ """
+ Process IPv6 address literals
+ """
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ #
+ # Also if an IPv6 address literal has a zone identifier, the
+ # percent sign might be URIencoded, convert it back into ASCII
+ if host.startswith('[') and host.endswith(']'):
+ host = host.replace('%25', '%').strip('[]')
+ return host
diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/python.d/python_modules/urllib3/contrib/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 000000000..e26b84086
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,590 @@
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes.util import find_library
+from ctypes import (
+ c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
+ c_bool
+)
+from ctypes import CDLL, POINTER, CFUNCTYPE
+
+
+security_path = find_library('Security')
+if not security_path:
+ raise ImportError('The library Security could not be found')
+
+
+core_foundation_path = find_library('CoreFoundation')
+if not core_foundation_path:
+ raise ImportError('The library CoreFoundation could not be found')
+
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split('.')))
+if version_info < (10, 8):
+ raise OSError(
+ 'Only OS X 10.8 and newer are supported, not %s.%s' % (
+ version_info[0], version_info[1]
+ )
+ )
+
+Security = CDLL(security_path, use_errno=True)
+CoreFoundation = CDLL(core_foundation_path, use_errno=True)
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [
+ CFAllocatorRef,
+ CFDataRef
+ ]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [
+ SecCertificateRef
+ ]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef)
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef)
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [
+ SecKeychainRef
+ ]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef)
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
+
+ Security.SSLSetIOFuncs.argtypes = [
+ SSLContextRef,
+ SSLReadFunc,
+ SSLWriteFunc
+ ]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [
+ SSLContextRef,
+ CFArrayRef
+ ]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [
+ SSLContextRef,
+ CFTypeRef,
+ Boolean
+ ]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [
+ SSLContextRef,
+ SSLConnectionRef
+ ]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite)
+ ]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol)
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [
+ SSLContextRef,
+ POINTER(SecTrustRef)
+ ]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [
+ SecTrustRef,
+ CFArrayRef
+ ]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
+ SecTrustRef,
+ Boolean
+ ]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [
+ SecTrustRef,
+ POINTER(SecTrustResultType)
+ ]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [
+ SecTrustRef
+ ]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [
+ SecTrustRef,
+ CFIndex
+ ]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [
+ SSLContextRef,
+ SSLSessionOption,
+ Boolean
+ ]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, 'kSecImportExportPassphrase'
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, 'kSecImportItemIdentity'
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [
+ CFStringRef,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFIndex
+ ]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [
+ CFDictionaryRef,
+ CFTypeRef
+ ]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [
+ CFMutableArrayRef,
+ c_void_p
+ ]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [
+ CFArrayRef
+ ]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
+ CFArrayRef,
+ CFIndex
+ ]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, 'kCFAllocatorDefault'
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError('Error initializing ctypes')
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 000000000..5e3494bce
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,343 @@
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import re
+import os
+import ssl
+import tempfile
+
+from .bindings import Security, CoreFoundation, CFConst
+
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p,
+ buffer,
+ 1024,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError('Error copying C string from CFStringRef')
+ string = buffer.value
+ if string is not None:
+ string = string.decode('utf-8')
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u'':
+ output = u'OSStatus %s' % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ der_certs = [
+ base64.b64decode(match.group(1))
+ for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
+ password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path,
+ len(password),
+ password,
+ False,
+ None,
+ ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, 'rb') as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ raw_filedata,
+ len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array) # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(
+ result_array, index
+ )
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(
+ keychain, file_path
+ )
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain,
+ certificates[0],
+ ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/python.d/python_modules/urllib3/contrib/appengine.py
new file mode 100644
index 000000000..814b0222d
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/appengine.py
@@ -0,0 +1,296 @@
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from urllib3 import PoolManager
+ from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import warnings
+from ..packages.six.moves.urllib.parse import urljoin
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ TimeoutError,
+ SSLError
+)
+
+from ..packages.six import BytesIO
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabtyes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(self, headers=None, retries=None, validate_certificate=True,
+ urlfetch_retries=True):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment.")
+
+ if is_prod_appengine_mvms():
+ raise AppEnginePlatformError(
+ "Use normal urllib3.PoolManager instead of AppEngineManager"
+ "on Managed VMs, as using URLFetch is not necessary in "
+ "this environment.")
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning)
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(self, method, url, body=None, headers=None,
+ retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = (
+ redirect and
+ retries.redirect != 0 and
+ retries.total)
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if 'too large' in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.", e)
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if 'Too many redirects' in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.", e)
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e)
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if (self.urlfetch_retries and retries.raise_on_redirect):
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method, redirect_url, body, headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader('Retry-After'))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method, url,
+ body=body, headers=headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+ if content_encoding == 'deflate':
+ del urlfetch_resp.headers['content-encoding']
+
+ transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == 'chunked':
+ encodings = transfer_encoding.split(",")
+ encodings.remove('chunked')
+ urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
+
+ return HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning)
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(
+ retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning)
+
+ return retries
+
+
+def is_appengine():
+ return (is_local_appengine() or
+ is_prod_appengine() or
+ is_prod_appengine_mvms())
+
+
+def is_appengine_sandbox():
+ return is_appengine() and not is_prod_appengine_mvms()
+
+
+def is_local_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+ not is_prod_appengine_mvms())
+
+
+def is_prod_appengine_mvms():
+ return os.environ.get('GAE_VM', False) == 'true'
diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/python.d/python_modules/urllib3/contrib/ntlmpool.py
new file mode 100644
index 000000000..642e99ed2
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,112 @@
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = 'https'
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split('\\', 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
+ self.num_connections, self.host, self.authurl)
+
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ req_header = 'Authorization'
+ resp_header = 'www-authenticate'
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = (
+ 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', reshdr)
+ log.debug('Response data: %s [...]', res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(', ')
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == 'NTLM ':
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception('Unexpected %s response header: %s' %
+ (resp_header, reshdr[resp_header]))
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = \
+ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
+ self.user,
+ self.domain,
+ self.pw,
+ NegotiateFlags)
+ headers[req_header] = 'NTLM %s' % auth_msg
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', dict(res.getheaders()))
+ log.debug('Response data: %s [...]', res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception('Server rejected request: wrong '
+ 'username or password')
+ raise Exception('Wrong server response: %s %s' %
+ (res.status, res.reason))
+
+ res.fp = None
+ log.debug('Connection established')
+ return conn
+
+ def urlopen(self, method, url, body=None, headers=None, retries=3,
+ redirect=True, assert_same_host=True):
+ if headers is None:
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ return super(NTLMConnectionPool, self).urlopen(method, url, body,
+ headers, retries,
+ redirect,
+ assert_same_host)
diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/python.d/python_modules/urllib3/contrib/pyopenssl.py
new file mode 100644
index 000000000..6645dbaa9
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,457 @@
+"""
+SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* pyOpenSSL (tested with 16.0.0)
+* cryptography (minimum 1.3.4, from pyopenssl)
+* idna (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+ pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
+
+ try:
+ import urllib3.contrib.pyopenssl
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+If you want to configure the default list of supported cipher suites, you can
+set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+from socket import timeout, error as SocketError
+from io import BytesIO
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+
+try:
+ import six
+except ImportError:
+ from ..packages import six
+
+import sys
+
+from .. import util
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+try:
+ _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
+except AttributeError:
+ pass
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED:
+ OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict(
+ (v, k) for k, v in _stdlib_to_openssl_verify.items()
+)
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
+
+ _validate_dependencies_met()
+
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ 'Undo monkey-patching by :func:`inject_into_urllib3`.'
+
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError("'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer.")
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError("'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer.")
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+ """
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ import idna
+
+ for prefix in [u'*.', u'.']:
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ return prefix.encode('ascii') + idna.encode(name)
+ return idna.encode(name)
+
+ name = idna_encode(name)
+ if sys.version_info >= (3, 0):
+ name = name.decode('utf-8')
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ # This is technically using private APIs, but should work across all
+ # relevant versions until PyOpenSSL gets something proper for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(
+ x509.SubjectAlternativeName
+ ).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (x509.DuplicateExtension, x509.UnsupportedExtension,
+ x509.UnsupportedGeneralNameType, UnicodeError) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ names = [
+ ('DNS', _dnsname_to_stdlib(name))
+ for name in ext.get_values_for_type(x509.DNSName)
+ ]
+ names.extend(
+ ('IP Address', str(name))
+ for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ '''API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ '''
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return b''
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b''
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv(*args, **kwargs)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ wr = util.wait_for_write(self.socket, self.socket.gettimeout())
+ if not wr:
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1,
+ x509)
+
+ return {
+ 'subject': (
+ (('commonName', x509.get_subject().CN),),
+ ),
+ 'subjectAltName': get_subj_alt_name(x509)
+ }
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(
+ _stdlib_to_openssl_verify[value],
+ _verify_callback
+ )
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode('utf-8')
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode('utf-8')
+ if capath is not None:
+ capath = capath.encode('utf-8')
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_file(certfile)
+ if password is not None:
+ self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode('utf-8')
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(sock, sock.gettimeout())
+ if not rd:
+ raise timeout('select timed out')
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError('bad handshake: %r' % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/python.d/python_modules/urllib3/contrib/securetransport.py
new file mode 100644
index 000000000..72b23ab1c
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/securetransport.py
@@ -0,0 +1,807 @@
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import urllib3.contrib.securetransport
+ urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import threading
+import weakref
+
+from .. import util
+from ._securetransport.bindings import (
+ Security, SecurityConst, CoreFoundation
+)
+from ._securetransport.low_level import (
+ _assert_no_error, _cert_array_from_pem, _temporary_keychain,
+ _load_client_cert_chain
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+try:
+ memoryview(b'')
+except NameError:
+ raise ImportError("SecureTransport only works on Pythons with memoryview")
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this becuase this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+_protocol_to_min_max = {
+ ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
+ )
+if hasattr(ssl, "PROTOCOL_TLS"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+ buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
+ buffer_view = memoryview(buffer)
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ readables = util.wait_for_read([base_socket], timeout)
+ if not readables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+
+ # We need to tell ctypes that we have a buffer that can be
+ # written to. Upsettingly, we do that like this:
+ chunk_size = base_socket.recv_into(
+ buffer_view[read_count:requested_length]
+ )
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ writables = util.wait_for_write([base_socket], timeout)
+ if not writables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, 'rb') as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(
+ trust, ctypes.byref(trust_result)
+ )
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is None:
+ CoreFoundation.CFRelease(cert_array)
+
+ # Ok, now we can look at what the result was.
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed
+ )
+ if trust_result.value not in successes:
+ raise ssl.SSLError(
+ "certificate verify failed, error code: %d" %
+ trust_result.value
+ )
+
+ def handshake(self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode('utf-8')
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context,
+ SecurityConst.kSSLSessionOptionBreakOnServerAuth,
+ True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(
+ self.context, self._client_cert_chain
+ )
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if (result == SecurityConst.errSSLWouldBlock):
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError(
+ "SecureTransport only supports dumping binary certs"
+ )
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError(
+ "SecureTransport doesn't support custom cipher strings"
+ )
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError(
+ "SecureTransport does not support cert directories"
+ )
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname, self._verify, self._trust_bundle,
+ self._min_version, self._max_version, self._client_cert,
+ self._client_key, self._client_key_passphrase
+ )
+ return wrapped_socket
diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/python.d/python_modules/urllib3/contrib/socks.py
new file mode 100644
index 000000000..39e92fde1
--- /dev/null
+++ b/python.d/python_modules/urllib3/contrib/socks.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4
+- SOCKS4a
+- SOCKS5
+- Usernames and passwords for the SOCKS proxy
+
+Known Limitations:
+
+- Currently PySocks does not support contacting remote websites via literal
+ IPv6 addresses. Any such connection attempt will fail. You must use a domain
+ name.
+- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
+ such connection attempt will fail.
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+ from ..exceptions import DependencyWarning
+
+ warnings.warn((
+ 'SOCKS support in urllib3 requires the installation of optional '
+ 'dependencies: specifically, PySocks. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
+ ),
+ DependencyWarning
+ )
+ raise
+
+from socket import error as SocketError, timeout as SocketTimeout
+
+from ..connection import (
+ HTTPConnection, HTTPSConnection
+)
+from ..connectionpool import (
+ HTTPConnectionPool, HTTPSConnectionPool
+)
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop('_socks_options')
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options['socks_version'],
+ proxy_addr=self._socks_options['proxy_host'],
+ proxy_port=self._socks_options['proxy_port'],
+ proxy_username=self._socks_options['username'],
+ proxy_password=self._socks_options['password'],
+ proxy_rdns=self._socks_options['rdns'],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout)
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+ pool_classes_by_scheme = {
+ 'http': SOCKSHTTPConnectionPool,
+ 'https': SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(self, proxy_url, username=None, password=None,
+ num_pools=10, headers=None, **connection_pool_kw):
+ parsed = parse_url(proxy_url)
+
+ if parsed.scheme == 'socks5':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == 'socks5h':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == 'socks4':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == 'socks4a':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError(
+ "Unable to determine SOCKS version from %s" % proxy_url
+ )
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ 'socks_version': socks_version,
+ 'proxy_host': parsed.host,
+ 'proxy_port': parsed.port,
+ 'username': username,
+ 'password': password,
+ 'rdns': rdns
+ }
+ connection_pool_kw['_socks_options'] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/python.d/python_modules/urllib3/exceptions.py b/python.d/python_modules/urllib3/exceptions.py
new file mode 100644
index 000000000..6c4be5810
--- /dev/null
+++ b/python.d/python_modules/urllib3/exceptions.py
@@ -0,0 +1,246 @@
+from __future__ import absolute_import
+from .packages.six.moves.http_client import (
+ IncompleteRead as httplib_IncompleteRead
+)
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ "Base exception used by this module."
+ pass
+
+
+class HTTPWarning(Warning):
+ "Base warning used by this module."
+ pass
+
+
+class PoolError(HTTPError):
+ "Base exception for errors caused within a pool."
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ "Base exception for PoolErrors that have associated URLs."
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ "Raised when SSL certificate fails in an HTTPS connection."
+ pass
+
+
+class ProxyError(HTTPError):
+ "Raised when the connection to a proxy fails."
+ pass
+
+
+class DecodeError(HTTPError):
+ "Raised when automatic decoding based on Content-Type fails."
+ pass
+
+
+class ProtocolError(HTTPError):
+ "Raised when something unexpected happens mid-request/response."
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (
+ url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ "Raised when an existing pool gets a request for a foreign host."
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """ Raised when passing an invalid state to a timeout """
+ pass
+
+
+class TimeoutError(HTTPError):
+ """ Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ "Raised when a socket timeout occurs while receiving data from a server"
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ "Raised when a socket timeout occurs while connecting to a server"
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+ pass
+
+
+class EmptyPoolError(PoolError):
+ "Raised when a pool runs out of connections and no more are allowed."
+ pass
+
+
+class ClosedPoolError(PoolError):
+ "Raised when a request enters a pool after the pool has been closed."
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ "Raised when there is something wrong with a given URL input."
+ pass
+
+
+class LocationParseError(LocationValueError):
+ "Raised when get_host or similar fails to parse the URL input."
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class ResponseError(HTTPError):
+ "Used as a container for an error reason supplied in a MaxRetryError."
+ GENERIC_ERROR = 'too many error responses'
+ SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+ "Warned when perfoming security reducing actions"
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ "Warned when connecting to a host with a certificate missing a SAN."
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ "Warned when making an unverified HTTPS request."
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ "Warned when system time is suspected to be wrong"
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ "Warned when certain SSL configuration is not available on a platform."
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ "Warned when making a HTTPS request without SNI available."
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ "Response needs to be chunked in order to read it as chunks."
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be httplib.HTTPResponse like (have an fp attribute which
+ returns raw chunks) for read_chunked().
+ """
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of http_client.IncompleteRead to allow int value
+ for `partial` to avoid creating large objects on streamed
+ reads.
+ """
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return ('IncompleteRead(%i bytes read, '
+ '%i more expected)' % (self.partial, self.expected))
+
+
+class InvalidHeader(HTTPError):
+ "The header provided was somehow invalid."
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+ "ProxyManager does not support the supplied scheme"
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+ "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+ def __init__(self, defects, unparsed_data):
+ message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ "urllib3 encountered an error when trying to rewind a body"
+ pass
diff --git a/python.d/python_modules/urllib3/fields.py b/python.d/python_modules/urllib3/fields.py
new file mode 100644
index 000000000..19b0ae0c8
--- /dev/null
+++ b/python.d/python_modules/urllib3/fields.py
@@ -0,0 +1,178 @@
+from __future__ import absolute_import
+import email.utils
+import mimetypes
+
+from .packages import six
+
+
+def guess_content_type(filename, default='application/octet-stream'):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param(name, value):
+ """
+ Helper function to format and quote a single header parameter.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows RFC 2231, as
+ suggested by RFC 2388 Section 4.4.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = '%s="%s"' % (name, value)
+ try:
+ result.encode('ascii')
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:
+ value = value.encode('utf-8')
+ value = email.utils.encode_rfc2231(value, 'utf-8')
+ value = '%s*=%s' % (name, value)
+ return value
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ """
+ def __init__(self, name, data, filename=None, headers=None):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+
+ @classmethod
+ def from_tuples(cls, fieldname, value):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(fieldname, data, filename=filename)
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ return format_header_param(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return '; '.join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append('%s: %s' % (header_name, header_value))
+
+ lines.append('\r\n')
+ return '\r\n'.join(lines)
+
+ def make_multipart(self, content_disposition=None, content_type=None,
+ content_location=None):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers['Content-Disposition'] = content_disposition or 'form-data'
+ self.headers['Content-Disposition'] += '; '.join([
+ '', self._render_parts(
+ (('name', self._name), ('filename', self._filename))
+ )
+ ])
+ self.headers['Content-Type'] = content_type
+ self.headers['Content-Location'] = content_location
diff --git a/python.d/python_modules/urllib3/filepost.py b/python.d/python_modules/urllib3/filepost.py
new file mode 100644
index 000000000..cd11cee46
--- /dev/null
+++ b/python.d/python_modules/urllib3/filepost.py
@@ -0,0 +1,94 @@
+from __future__ import absolute_import
+import codecs
+
+from uuid import uuid4
+from io import BytesIO
+
+from .packages import six
+from .packages.six import b
+from .fields import RequestField
+
+writer = codecs.lookup('utf-8')[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ return uuid4().hex
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`mimetools.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b('--%s\r\n' % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b'\r\n')
+
+ body.write(b('--%s--\r\n' % (boundary)))
+
+ content_type = str('multipart/form-data; boundary=%s' % boundary)
+
+ return body.getvalue(), content_type
diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/python.d/python_modules/urllib3/packages/__init__.py
new file mode 100644
index 000000000..170e974c1
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ('ssl_match_hostname', )
diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/python.d/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/backports/__init__.py
diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/python.d/python_modules/urllib3/packages/backports/makefile.py
new file mode 100644
index 000000000..75b80dcf8
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/backports/makefile.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+
+from socket import SocketIO
+
+
+def backport_makefile(self, mode="r", buffering=None, encoding=None,
+ errors=None, newline=None):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= set(["r", "w", "b"]):
+ raise ValueError(
+ "invalid mode %r (only r, w, b allowed)" % (mode,)
+ )
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/python.d/python_modules/urllib3/packages/ordered_dict.py
new file mode 100644
index 000000000..4479363cc
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/ordered_dict.py
@@ -0,0 +1,259 @@
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+# Copyright 2009 Raymond Hettinger, released under the MIT License.
+# http://code.activestate.com/recipes/576693/
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+ pass
+
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
diff --git a/python.d/python_modules/urllib3/packages/six.py b/python.d/python_modules/urllib3/packages/six.py
new file mode 100644
index 000000000..190c0239c
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 000000000..d6594eb26
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,19 @@
+import sys
+
+try:
+ # Our match_hostname function is the same as 3.5's, so we only want to
+ # import the match_hostname function if it's at least that good.
+ if sys.version_info < (3, 5):
+ raise ImportError("Fallback to vendored code")
+
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ # Backport of the function from a pypi module
+ from backports.ssl_match_hostname import CertificateError, match_hostname
+ except ImportError:
+ # Our vendored copy
+ from ._implementation import CertificateError, match_hostname
+
+# Not needed, but documenting what we provide.
+__all__ = ('CertificateError', 'match_hostname')
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 000000000..1fd42f38a
--- /dev/null
+++ b/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,157 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# Note: This file is under the PSF license as the code comes from the python
+# stdlib. http://docs.python.org/3/license.html
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# backports.ssl_match_hostname to continue to be used all the way back to
+# python-2.4.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = '3.5.0.1'
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ obj = unicode(obj, encoding='ascii', errors='strict')
+ return obj
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
diff --git a/python.d/python_modules/urllib3/poolmanager.py b/python.d/python_modules/urllib3/poolmanager.py
new file mode 100644
index 000000000..4ae91744d
--- /dev/null
+++ b/python.d/python_modules/urllib3/poolmanager.py
@@ -0,0 +1,440 @@
+from __future__ import absolute_import
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.url import parse_url
+from .util.retry import Retry
+
+
+__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
+ 'ssl_version', 'ca_cert_dir', 'ssl_context')
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ 'key_scheme', # str
+ 'key_host', # str
+ 'key_port', # int
+ 'key_timeout', # int or float or Timeout
+ 'key_retries', # int or Retry
+ 'key_strict', # bool
+ 'key_block', # bool
+ 'key_source_address', # str
+ 'key_key_file', # str
+ 'key_cert_file', # str
+ 'key_cert_reqs', # str
+ 'key_ca_certs', # str
+ 'key_ssl_version', # str
+ 'key_ca_cert_dir', # str
+ 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ 'key_maxsize', # int
+ 'key_headers', # dict
+ 'key__proxy', # parsed proxy url
+ 'key__proxy_headers', # dict
+ 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
+ 'key__socks_options', # dict
+ 'key_assert_hostname', # bool or string
+ 'key_assert_fingerprint', # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple('PoolKey', _key_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context['scheme'] = context['scheme'].lower()
+ context['host'] = context['host'].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ('headers', '_proxy_headers', '_socks_options'):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get('socket_options')
+ if socket_opts is not None:
+ context['socket_options'] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context['key_' + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ 'http': functools.partial(_default_key_normalizer, PoolKey),
+ 'https': functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {
+ 'http': HTTPConnectionPool,
+ 'https': HTTPSConnectionPool,
+}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools,
+ dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ('scheme', 'host', 'port'):
+ request_context.pop(key, None)
+
+ if scheme == 'http':
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ """
+ Get a :class:`ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context['scheme'] = scheme or 'http'
+ if not port:
+ port = port_by_scheme.get(request_context['scheme'].lower(), 80)
+ request_context['port'] = port
+ request_context['host'] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context['scheme'].lower()
+ pool_key_constructor = self.key_fn_by_scheme[scheme]
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context['scheme']
+ host = request_context['host']
+ port = request_context['port']
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
+ pool_kwargs=pool_kwargs)
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw['assert_same_host'] = False
+ kw['redirect'] = False
+ if 'headers' not in kw:
+ kw['headers'] = self.headers
+
+ if self.proxy is not None and u.scheme == "http":
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = 'GET'
+
+ retries = kw.get('retries')
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise
+ return response
+
+ kw['retries'] = retries
+ kw['redirect'] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary contaning headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(self, proxy_url, num_pools=10, headers=None,
+ proxy_headers=None, **connection_pool_kw):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
+ proxy_url.port)
+ proxy = parse_url(proxy_url)
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+
+ connection_pool_kw['_proxy'] = self.proxy
+ connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
+ super(ProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs)
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {'Accept': '*/*'}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_['Host'] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+
+ if u.scheme == "http":
+ # For proxied HTTPS requests, httplib sets the necessary headers
+ # on the CONNECT to the proxy. For HTTP, we'll definitely
+ # need to set 'Host' at the very least.
+ headers = kw.get('headers', self.headers)
+ kw['headers'] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/python.d/python_modules/urllib3/request.py b/python.d/python_modules/urllib3/request.py
new file mode 100644
index 000000000..c0fddff04
--- /dev/null
+++ b/python.d/python_modules/urllib3/request.py
@@ -0,0 +1,148 @@
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+
+__all__ = ['RequestMethods']
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
+ :class:`~urllib3.poolmanager.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **kw): # Abstract
+ raise NotImplemented("Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method.")
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+ else:
+ return self.request_encode_body(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+
+ def request_encode_url(self, method, url, fields=None, headers=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += '?' + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(self, method, url, fields=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :meth:`urllib.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimick behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': {}}
+
+ if fields:
+ if 'body' in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one.")
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+ else:
+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+ extra_kw['body'] = body
+ extra_kw['headers'] = {'Content-Type': content_type}
+
+ extra_kw['headers'].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/python.d/python_modules/urllib3/response.py b/python.d/python_modules/urllib3/response.py
new file mode 100644
index 000000000..408d9996a
--- /dev/null
+++ b/python.d/python_modules/urllib3/response.py
@@ -0,0 +1,622 @@
+from __future__ import absolute_import
+from contextlib import contextmanager
+import zlib
+import io
+import logging
+from socket import timeout as SocketTimeout
+from socket import error as SocketError
+
+from ._collections import HTTPHeaderDict
+from .exceptions import (
+ BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
+ ResponseNotChunked, IncompleteRead, InvalidHeader
+)
+from .packages.six import string_types as basestring, binary_type, PY3
+from .packages.six.moves import http_client as httplib
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+
+ def __init__(self):
+ self._first_try = True
+ self._data = binary_type()
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoder(object):
+
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+ return self._obj.decompress(data)
+
+
+def _get_decoder(mode):
+ if mode == 'gzip':
+ return GzipDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in httplib.HTTPResponse:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, attempts to decode specific content-encoding's based on headers
+ (like 'gzip' and 'deflate') will be skipped and raw data will be used
+ instead.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ['gzip', 'deflate']
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(self, body='', headers=None, status=0, version=0, reason=None,
+ strict=0, preload_content=True, decode_content=True,
+ original_response=None, pool=None, connection=None,
+ retries=None, enforce_content_length=False, request_method=None):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+
+ if body and isinstance(body, (basestring, binary_type)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, 'read'):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get('transfer-encoding', '').lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get('location')
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ @property
+ def data(self):
+ # For backwords-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``HTTPResponse.read`` if bytes
+ are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get('content-length')
+
+ if length is not None and self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning("Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked.")
+ return None
+
+ elif length is not None:
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(',')])
+ if len(lengths) > 1:
+ raise InvalidHeader("Content-Length contained multiple "
+ "unmatching values (%s)" % length)
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ try:
+ if decode_content and self._decoder:
+ data = self._decoder.decompress(data)
+ except (IOError, zlib.error) as e:
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding, e)
+
+ if flush_decoder and decode_content:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b'')
+ return buf + self._decoder.flush()
+
+ return b''
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if 'read operation timed out' not in str(e): # Defensive:
+ # This shouldn't happen but just in case we're missing an edge
+ # case, let's avoid swallowing SSL errors.
+ raise
+
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError('Connection broken: %r' % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ data = None
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read()
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt)
+ if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (0, None):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2**16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`httplib.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if PY3: # Python 3
+ headers = HTTPHeaderDict(headers.items())
+ else: # Python 2
+ headers = HTTPHeaderDict.from_httplib(headers)
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, 'strict', 0)
+ resp = ResponseCls(body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw)
+ return resp
+
+ # Backwards-compatibility methods for httplib.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ @property
+ def closed(self):
+ if self._fp is None:
+ return True
+ elif hasattr(self._fp, 'isclosed'):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, 'closed'):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError("The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor")
+
+ def flush(self):
+ if self._fp is not None and hasattr(self._fp, 'flush'):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[:len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ httplib.HTTPResponse object. We do this by testing for the fp
+ attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, 'fp')
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b';', 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise httplib.IncompleteRead(line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing.")
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be httplib.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks.")
+
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ with self._error_catcher():
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(chunk, decode_content=decode_content,
+ flush_decoder=False)
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b'\r\n':
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
diff --git a/python.d/python_modules/urllib3/util/__init__.py b/python.d/python_modules/urllib3/util/__init__.py
new file mode 100644
index 000000000..2f2770b62
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/__init__.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+ SSLContext,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import (
+ current_time,
+ Timeout,
+)
+
+from .retry import Retry
+from .url import (
+ get_host,
+ parse_url,
+ split_first,
+ Url,
+)
+from .wait import (
+ wait_for_read,
+ wait_for_write
+)
+
+__all__ = (
+ 'HAS_SNI',
+ 'IS_PYOPENSSL',
+ 'IS_SECURETRANSPORT',
+ 'SSLContext',
+ 'Retry',
+ 'Timeout',
+ 'Url',
+ 'assert_fingerprint',
+ 'current_time',
+ 'is_connection_dropped',
+ 'is_fp_closed',
+ 'get_host',
+ 'parse_url',
+ 'make_headers',
+ 'resolve_cert_reqs',
+ 'resolve_ssl_version',
+ 'split_first',
+ 'ssl_wrap_socket',
+ 'wait_for_read',
+ 'wait_for_write'
+)
diff --git a/python.d/python_modules/urllib3/util/connection.py b/python.d/python_modules/urllib3/util/connection.py
new file mode 100644
index 000000000..bf699cfd0
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/connection.py
@@ -0,0 +1,130 @@
+from __future__ import absolute_import
+import socket
+from .wait import wait_for_read
+from .selectors import HAS_SELECT, SelectorError
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`httplib.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, 'sock', False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+
+ if not HAS_SELECT:
+ return False
+
+ try:
+ return bool(wait_for_read(sock, timeout=0.0))
+ except SelectorError:
+ return True
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, socket_options=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith('['):
+ host = host.strip('[]')
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/shazow/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6('::1')
diff --git a/python.d/python_modules/urllib3/util/request.py b/python.d/python_modules/urllib3/util/request.py
new file mode 100644
index 000000000..3ddfcd559
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/request.py
@@ -0,0 +1,118 @@
+from __future__ import absolute_import
+from base64 import b64encode
+
+from ..packages.six import b, integer_types
+from ..exceptions import UnrewindableBodyError
+
+ACCEPT_ENCODING = 'gzip,deflate'
+_FAILEDTELL = object()
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+ basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ','.join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers['accept-encoding'] = accept_encoding
+
+ if user_agent:
+ headers['user-agent'] = user_agent
+
+ if keep_alive:
+ headers['connection'] = 'keep-alive'
+
+ if basic_auth:
+ headers['authorization'] = 'Basic ' + \
+ b64encode(b(basic_auth)).decode('utf-8')
+
+ if proxy_basic_auth:
+ headers['proxy-authorization'] = 'Basic ' + \
+ b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+ if disable_cache:
+ headers['cache-control'] = 'no-cache'
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, 'tell', None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, 'seek', None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError("An error occurred when rewinding request "
+ "body for redirect/retry.")
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError("Unable to record file position for rewinding "
+ "request body during a redirect/retry.")
+ else:
+ raise ValueError("body_pos must be of type integer, "
+ "instead it was %s." % type(body_pos))
diff --git a/python.d/python_modules/urllib3/util/response.py b/python.d/python_modules/urllib3/util/response.py
new file mode 100644
index 000000000..67cf730ab
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/response.py
@@ -0,0 +1,81 @@
+from __future__ import absolute_import
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param headers: Headers to verify.
+ :type headers: `httplib.HTTPMessage`.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError('expected httplib.Message, got {0}.'.format(
+ type(headers)))
+
+ defects = getattr(headers, 'defects', None)
+ get_payload = getattr(headers, 'get_payload', None)
+
+ unparsed_data = None
+ if get_payload: # Platform-specific: Python 3.
+ unparsed_data = get_payload()
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param conn:
+ :type conn: :class:`httplib.HTTPResponse`
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == 'HEAD'
diff --git a/python.d/python_modules/urllib3/util/retry.py b/python.d/python_modules/urllib3/util/retry.py
new file mode 100644
index 000000000..c603cb490
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/retry.py
@@ -0,0 +1,401 @@
+from __future__ import absolute_import
+import time
+import logging
+from collections import namedtuple
+from itertools import takewhile
+import email
+import re
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ MaxRetryError,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseError,
+ InvalidHeader,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
+ "status", "redirect_location"])
+
+
+class Retry(object):
+ """ Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts. It's a good idea to set this to some sensibly-high value to
+ account for unexpected edge cases and avoid infinite retry loops.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param iterable method_whitelist:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``method_whitelist``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ^ ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ """
+
+ DEFAULT_METHOD_WHITELIST = frozenset([
+ 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Maximum backoff time.
+ BACKOFF_MAX = 120
+
+ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
+ method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+ backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
+ history=None, respect_retry_after_header=True):
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.method_whitelist = method_whitelist
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
+ method_whitelist=self.method_whitelist,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ )
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """ Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """ Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
+ reversed(self.history))))
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ retry_date = time.mktime(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """ Get the value of Retry-After in seconds. """
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """ Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """ Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """ Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """ Checks if a given HTTP method should be retried upon, depending if
+ it is included on the method whitelist.
+ """
+ if self.method_whitelist and method.upper() not in self.method_whitelist:
+ return False
+
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """ Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (self.total and self.respect_retry_after_header and
+ has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
+
+ def is_exhausted(self):
+ """ Are we out of retries? """
+ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(self, method=None, url=None, response=None, error=None,
+ _pool=None, _stacktrace=None):
+ """ Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ cause = 'unknown'
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = 'too many redirects'
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(
+ status_code=response.status)
+ status = response.status
+
+ history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
+
+ new_retry = self.new(
+ total=total,
+ connect=connect, read=read, redirect=redirect, status=status_count,
+ history=history)
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+ 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
+ cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/python.d/python_modules/urllib3/util/selectors.py b/python.d/python_modules/urllib3/util/selectors.py
new file mode 100644
index 000000000..d75cb266b
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/selectors.py
@@ -0,0 +1,581 @@
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no dile descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+from collections import namedtuple, Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+_DEFAULT_SELECTOR = None
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Determine which function to use to wrap system calls because Python 3.5+
+# already handles the case when system calls are interrupted.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all system calls automatically restart
+ and recalculate their timeouts. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno=errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as e: # Platform-specific: Windows.
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._kqueue.control, True,
+ None, max_events, timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+
+if not hasattr(select, 'select'): # Platform-specific: AppEngine
+ HAS_SELECT = False
+
+
+def _can_allocate(struct):
+ """ Checks that select structs can be allocated by the underlying
+ operating system, not just advertised by the select module. We don't
+ check select() because we'll be hopeful that most platforms that
+ don't have it available will not advertise it. (ie: GAE) """
+ try:
+ # select.poll() objects won't fail until used.
+ if struct == 'poll':
+ p = select.poll()
+ p.poll(0)
+
+ # All others will fail on allocation.
+ else:
+ getattr(select, struct)().close()
+ return True
+ except (OSError, AttributeError) as e:
+ return False
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll > poll > select. Devpoll not supported. (See above)
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+def DefaultSelector():
+ """ This function serves as a first call for DefaultSelector to
+ detect if the select module is being monkey-patched incorrectly
+ by eventlet, greenlet, and preserve proper behavior. """
+ global _DEFAULT_SELECTOR
+ if _DEFAULT_SELECTOR is None:
+ if _can_allocate('kqueue'):
+ _DEFAULT_SELECTOR = KqueueSelector
+ elif _can_allocate('epoll'):
+ _DEFAULT_SELECTOR = EpollSelector
+ elif _can_allocate('poll'):
+ _DEFAULT_SELECTOR = PollSelector
+ elif hasattr(select, 'select'):
+ _DEFAULT_SELECTOR = SelectSelector
+ else: # Platform-specific: AppEngine
+ raise ValueError('Platform does not have a selector')
+ return _DEFAULT_SELECTOR()
diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/python.d/python_modules/urllib3/util/ssl_.py
new file mode 100644
index 000000000..33d428ed8
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/ssl_.py
@@ -0,0 +1,337 @@
+from __future__ import absolute_import
+import errno
+import warnings
+import hmac
+
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
+
+
+SSLContext = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+ 32: md5,
+ 40: sha1,
+ 64: sha256,
+}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for l, r in zip(bytearray(a), bytearray(b)):
+ result |= l ^ r
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, 'compare_digest',
+ _const_compare_digest_backport)
+
+
+try: # Test for SSL features
+ import ssl
+ from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+
+try:
+ from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = ':'.join([
+ 'ECDH+AESGCM',
+ 'ECDH+CHACHA20',
+ 'DH+AESGCM',
+ 'DH+CHACHA20',
+ 'ECDH+AES256',
+ 'DH+AES256',
+ 'ECDH+AES128',
+ 'DH+AES',
+ 'RSA+AESGCM',
+ 'RSA+AES',
+ '!aNULL',
+ '!eNULL',
+ '!MD5',
+])
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+ import sys
+
+ class SSLContext(object): # Platform-specific: Python 2 & 3.1
+ supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
+ (3, 2) <= sys.version_info)
+
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ if not self.supports_set_ciphers:
+ raise TypeError(
+ 'Your version of Python does not support setting '
+ 'a custom cipher suite. Please upgrade to Python '
+ '2.7, 3.2, or later if you need this functionality.'
+ )
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ 'A true SSLContext object is not available. This prevents '
+ 'urllib3 from configuring SSL appropriately and may cause '
+ 'certain SSL connections to fail. You can upgrade to a newer '
+ 'version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ InsecurePlatformWarning
+ )
+ kwargs = {
+ 'keyfile': self.keyfile,
+ 'certfile': self.certfile,
+ 'ca_certs': self.ca_certs,
+ 'cert_reqs': self.verify_mode,
+ 'ssl_version': self.protocol,
+ 'server_side': server_side,
+ }
+ if self.supports_set_ciphers: # Platform-specific: Python 2.7+
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+ else: # Platform-specific: Python 2.6
+ return wrap_socket(socket, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(':', '').lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError(
+ 'Fingerprint of invalid length: {0}'.format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+ .format(fingerprint, hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_NONE`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbrevation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_NONE
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'CERT_' + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_SSLv23
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'PROTOCOL_' + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
+ options=None, ciphers=None):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+
+ context.options |= options
+
+ if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ context.verify_mode = cert_reqs
+ if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+ return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+ ca_certs=None, server_hostname=None,
+ ssl_version=None, ciphers=None, ssl_context=None,
+ ca_cert_dir=None):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support. This is not
+ supported on Python 2.6 as the ssl module does not support it.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs,
+ ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir)
+ except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
+ raise SSLError(e)
+ # Py33 raises FileNotFoundError which subclasses OSError
+ # These are not equivalent unless we check the errno attribute
+ except OSError as e: # Platform-specific: Python 3.3 and beyond
+ if e.errno == errno.ENOENT:
+ raise SSLError(e)
+ raise
+ elif getattr(context, 'load_default_certs', None) is not None:
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ if certfile:
+ context.load_cert_chain(certfile, keyfile)
+ if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
+ return context.wrap_socket(sock, server_hostname=server_hostname)
+
+ warnings.warn(
+ 'An HTTPS request has been made, but the SNI (Subject Name '
+ 'Indication) extension to TLS is not available on this platform. '
+ 'This may cause the server to present an incorrect TLS '
+ 'certificate, which can cause validation failures. You can upgrade to '
+ 'a newer version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ SNIMissingWarning
+ )
+ return context.wrap_socket(sock)
diff --git a/python.d/python_modules/urllib3/util/timeout.py b/python.d/python_modules/urllib3/util/timeout.py
new file mode 100644
index 000000000..cec817e6e
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/timeout.py
@@ -0,0 +1,242 @@
+from __future__ import absolute_import
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """ Timeout configuration.
+
+ Timeouts can be defined as a default for a pool::
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``::
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: integer, float, or None
+
+ :param connect:
+ The maximum amount of time to wait for a connection attempt to a server
+ to succeed. Omitting the parameter will default the connect timeout to
+ the system default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: integer, float, or None
+
+ :param read:
+ The maximum amount of time to wait between consecutive
+ read operations for a response from the server. Omitting
+ the parameter will default the read timeout to the system
+ default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: integer, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, 'connect')
+ self._read = self._validate_timeout(read, 'read')
+ self.total = self._validate_timeout(total, 'total')
+ self._start_connect = None
+
+ def __str__(self):
+ return '%s(connect=%r, read=%r, total=%r)' % (
+ type(self).__name__, self._connect, self._read, self.total)
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """ Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError("Timeout cannot be a boolean value. It must "
+ "be an int, float or None.")
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ try:
+ if value <= 0:
+ raise ValueError("Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value))
+ except TypeError: # Python 3
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """ Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """ Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read,
+ total=self.total)
+
+ def start_connect(self):
+ """ Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError("Can't get connect duration for timer "
+ "that has not started.")
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """ Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """ Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (self.total is not None and
+ self.total is not self.DEFAULT_TIMEOUT and
+ self._read is not None and
+ self._read is not self.DEFAULT_TIMEOUT):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(),
+ self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/python.d/python_modules/urllib3/util/url.py b/python.d/python_modules/urllib3/util/url.py
new file mode 100644
index 000000000..6b6f9968d
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/url.py
@@ -0,0 +1,230 @@
+from __future__ import absolute_import
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ('http', 'https', None)
+
+
+class Url(namedtuple('Url', url_attrs)):
+ """
+ Datastructure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+ __slots__ = ()
+
+ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+ query=None, fragment=None):
+ if path and not path.startswith('/'):
+ path = '/' + path
+ if scheme:
+ scheme = scheme.lower()
+ if host and scheme in NORMALIZABLE_SCHEMES:
+ host = host.lower()
+ return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+ query, fragment)
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or '/'
+
+ if self.query is not None:
+ uri += '?' + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return '%s:%d' % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = ''
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + '://'
+ if auth is not None:
+ url += auth + '@'
+ if host is not None:
+ url += host
+ if port is not None:
+ url += ':' + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += '?' + query
+ if fragment is not None:
+ url += '#' + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, '', None
+
+ return s[:min_idx], s[min_idx + 1:], min_delim
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+
+ # While this code has overlap with stdlib's urlparse, it is much
+ # simplified for our needs and less annoying.
+ # Additionally, this implementations does silly things to be optimal
+ # on CPython.
+
+ if not url:
+ # Empty
+ return Url()
+
+ scheme = None
+ auth = None
+ host = None
+ port = None
+ path = None
+ fragment = None
+ query = None
+
+ # Scheme
+ if '://' in url:
+ scheme, url = url.split('://', 1)
+
+ # Find the earliest Authority Terminator
+ # (http://tools.ietf.org/html/rfc3986#section-3.2)
+ url, path_, delim = split_first(url, ['/', '?', '#'])
+
+ if delim:
+ # Reassemble the path
+ path = delim + path_
+
+ # Auth
+ if '@' in url:
+ # Last '@' denotes end of auth part
+ auth, url = url.rsplit('@', 1)
+
+ # IPv6
+ if url and url[0] == '[':
+ host, url = url.split(']', 1)
+ host += ']'
+
+ # Port
+ if ':' in url:
+ _host, port = url.split(':', 1)
+
+ if not host:
+ host = _host
+
+ if port:
+ # If given, ports must be integers. No whitespace, no plus or
+ # minus prefixes, no non-integer digits such as ^2 (superscript).
+ if not port.isdigit():
+ raise LocationParseError(url)
+ try:
+ port = int(port)
+ except ValueError:
+ raise LocationParseError(url)
+ else:
+ # Blank ports are cool, too. (rfc3986#section-3.2.3)
+ port = None
+
+ elif not host and url:
+ host = url
+
+ if not path:
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+ # Fragment
+ if '#' in path:
+ path, fragment = path.split('#', 1)
+
+ # Query
+ if '?' in path:
+ path, query = path.split('?', 1)
+
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or 'http', p.hostname, p.port
diff --git a/python.d/python_modules/urllib3/util/wait.py b/python.d/python_modules/urllib3/util/wait.py
new file mode 100644
index 000000000..cb396e508
--- /dev/null
+++ b/python.d/python_modules/urllib3/util/wait.py
@@ -0,0 +1,40 @@
+from .selectors import (
+ HAS_SELECT,
+ DefaultSelector,
+ EVENT_READ,
+ EVENT_WRITE
+)
+
+
+def _wait_for_io_events(socks, events, timeout=None):
+ """ Waits for IO events to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be interacted with immediately. """
+ if not HAS_SELECT:
+ raise ValueError('Platform does not have a selector')
+ if not isinstance(socks, list):
+ # Probably just a single socket.
+ if hasattr(socks, "fileno"):
+ socks = [socks]
+ # Otherwise it might be a non-list iterable.
+ else:
+ socks = list(socks)
+ with DefaultSelector() as selector:
+ for sock in socks:
+ selector.register(sock, events)
+ return [key[0].fileobj for key in
+ selector.select(timeout) if key[1] & events]
+
+
+def wait_for_read(socks, timeout=None):
+ """ Waits for reading to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be read from immediately. """
+ return _wait_for_io_events(socks, EVENT_READ, timeout)
+
+
+def wait_for_write(socks, timeout=None):
+ """ Waits for writing to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be written to immediately. """
+ return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/python.d/rabbitmq.chart.py b/python.d/rabbitmq.chart.py
index 15a6a80f7..763912039 100644
--- a/python.d/rabbitmq.chart.py
+++ b/python.d/rabbitmq.chart.py
@@ -2,40 +2,41 @@
# Description: rabbitmq netdata python.d module
# Author: l2isbad
-from base import UrlService
+from collections import namedtuple
+from json import loads
from socket import gethostbyname, gaierror
+from threading import Thread
try:
from queue import Queue
except ImportError:
from Queue import Queue
-from threading import Thread
-from collections import namedtuple
-from json import loads
+
+from base import UrlService
# default module values (can be overridden per job in `config`)
update_every = 1
priority = 60000
retries = 60
-METHODS = namedtuple('METHODS', ['get_data_function', 'url', 'stats'])
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
-NODE_STATS = [('fd_used', None),
- ('mem_used', None),
- ('sockets_used', None),
- ('proc_used', None),
- ('disk_free', None)
+NODE_STATS = ['fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free'
]
-OVERVIEW_STATS = [('object_totals.channels', None),
- ('object_totals.consumers', None),
- ('object_totals.connections', None),
- ('object_totals.queues', None),
- ('object_totals.exchanges', None),
- ('queue_totals.messages_ready', None),
- ('queue_totals.messages_unacknowledged', None),
- ('message_stats.ack', None),
- ('message_stats.redeliver', None),
- ('message_stats.deliver', None),
- ('message_stats.publish', None)
+OVERVIEW_STATS = ['object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish'
]
ORDER = ['queued_messages', 'message_rates', 'global_counts',
'file_descriptors', 'socket_descriptors', 'erlang_processes', 'memory', 'disk_space']
@@ -75,27 +76,27 @@ CHARTS = {
'options': [None, 'Global Counts', 'counts', 'overview',
'rabbitmq.global_counts', 'line'],
'lines': [
- ['channels', None, 'absolute'],
- ['consumers', None, 'absolute'],
- ['connections', None, 'absolute'],
- ['queues', None, 'absolute'],
- ['exchanges', None, 'absolute']
+ ['object_totals_channels', 'channels', 'absolute'],
+ ['object_totals_consumers', 'consumers', 'absolute'],
+ ['object_totals_connections', 'connections', 'absolute'],
+ ['object_totals_queues', 'queues', 'absolute'],
+ ['object_totals_exchanges', 'exchanges', 'absolute']
]},
'queued_messages': {
'options': [None, 'Queued Messages', 'messages', 'overview',
'rabbitmq.queued_messages', 'stacked'],
'lines': [
- ['messages_ready', 'ready', 'absolute'],
- ['messages_unacknowledged', 'unacknowledged', 'absolute']
+ ['queue_totals_messages_ready', 'ready', 'absolute'],
+ ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
]},
'message_rates': {
'options': [None, 'Message Rates', 'messages/s', 'overview',
'rabbitmq.message_rates', 'stacked'],
'lines': [
- ['ack', None, 'incremental'],
- ['redeliver', None, 'incremental'],
- ['deliver', None, 'incremental'],
- ['publish', None, 'incremental']
+ ['message_stats_ack', 'ack', 'incremental'],
+ ['message_stats_redeliver', 'redeliver', 'incremental'],
+ ['message_stats_deliver', 'deliver', 'incremental'],
+ ['message_stats_publish', 'publish', 'incremental']
]}
}
@@ -123,22 +124,19 @@ class Service(UrlService):
return False
# Add handlers (auth, self signed cert accept)
- url = '%s://%s:%s/api' % (self.scheme, self.host, self.port)
- self.opener = self._build_opener(url=url)
- if not self.opener:
- return False
+ self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme,
+ host=self.host,
+ port=self.port)
# Add methods
- api_node = url + '/nodes'
- api_overview = url + '/overview'
- self.methods = [METHODS(get_data_function=self._get_overview_stats, url=api_node, stats=NODE_STATS),
- METHODS(get_data_function=self._get_overview_stats, url=api_overview, stats=OVERVIEW_STATS)]
-
- result = self._get_data()
- if not result:
- self.error('_get_data() returned no data')
- return False
- self._data_from_check = result
- return True
+ api_node = self.url + '/nodes'
+ api_overview = self.url + '/overview'
+ self.methods = [METHODS(get_data=self._get_overview_stats,
+ url=api_node,
+ stats=NODE_STATS),
+ METHODS(get_data=self._get_overview_stats,
+ url=api_overview,
+ stats=OVERVIEW_STATS)]
+ return UrlService.check(self)
def _get_data(self):
threads = list()
@@ -146,7 +144,8 @@ class Service(UrlService):
result = dict()
for method in self.methods:
- th = Thread(target=method.get_data_function, args=(queue, method.url, method.stats))
+ th = Thread(target=method.get_data,
+ args=(queue, method.url, method.stats))
th.start()
threads.append(th)
@@ -169,19 +168,19 @@ class Service(UrlService):
data = loads(raw_data)
data = data[0] if isinstance(data, list) else data
- to_netdata = fetch_data_(raw_data=data, metrics_list=stats)
+ to_netdata = fetch_data(raw_data=data, metrics=stats)
return queue.put(to_netdata)
-def fetch_data_(raw_data, metrics_list):
- to_netdata = dict()
- for metric, new_name in metrics_list:
+def fetch_data(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
value = raw_data
- for key in metric.split('.'):
- try:
- value = value[key]
- except (KeyError, TypeError):
- break
- if not isinstance(value, dict):
- to_netdata[new_name or key] = value
- return to_netdata
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py
index 4bc1d41f9..7c3c43f5a 100644
--- a/python.d/redis.chart.py
+++ b/python.d/redis.chart.py
@@ -68,7 +68,8 @@ CHARTS = {
['connected_slaves', 'connected', 'absolute']
]},
'persistence': {
- 'options': [None, 'Redis Persistence Changes Since Last Save', 'changes', 'persistence', 'redis.rdb_changes', 'line'],
+ 'options': [None, 'Redis Persistence Changes Since Last Save', 'changes', 'persistence',
+ 'redis.rdb_changes', 'line'],
'lines': [
['rdb_changes_since_last_save', 'changes', 'absolute']
]}
@@ -78,34 +79,30 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "INFO\r\n"
self.order = ORDER
self.definitions = CHARTS
self._keep_alive = True
self.chart_name = ""
- self.passwd = None
- self.port = 6379
- if 'port' in configuration:
- self.port = configuration['port']
- if 'pass' in configuration:
- self.passwd = configuration['pass']
- if 'host' in configuration:
- self.host = configuration['host']
- if 'socket' in configuration:
- self.unix_socket = configuration['socket']
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 6379)
+ self.unix_socket = self.configuration.get('socket')
+ password = self.configuration.get('pass', str())
+ self.requests = dict(request='INFO\r\n'.encode(),
+ password=' '.join(['AUTH', password, '\r\n']).encode() if password else None)
+ self.request = self.requests['request']
def _get_data(self):
"""
Get data from socket
:return: dict
"""
- if self.passwd:
- self.request = "AUTH " + self.passwd + "\r\n"
+ if self.requests['password']:
+ self.request = self.requests['password']
raw = self._get_raw_data().strip()
if raw != "+OK":
self.error("invalid password")
return None
- self.request = "INFO\r\n"
+ self.request = self.requests['request']
response = self._get_raw_data()
if response is None:
# error has already been logged
@@ -117,7 +114,7 @@ class Service(SocketService):
self.error("response is invalid/empty")
return None
- data = {}
+ data = dict()
for line in parsed:
if len(line) < 5 or line[0] == '$' or line[0] == '#':
continue
@@ -140,8 +137,9 @@ class Service(SocketService):
return None
try:
- data['hit_rate'] = (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
- except:
+ data['hit_rate'] = (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits'])
+ + int(data['keyspace_misses']))
+ except (KeyError, ZeroDivisionError, TypeError):
data['hit_rate'] = 0
return data
@@ -172,7 +170,6 @@ class Service(SocketService):
Parse configuration, check if redis is available, and dynamically create chart lines data
:return: boolean
"""
- self._parse_config()
if self.name == "":
self.name = "local"
self.chart_name += "_" + self.name
@@ -183,5 +180,4 @@ class Service(SocketService):
for name in data:
if name.startswith('db'):
self.definitions['keys']['lines'].append([name, None, 'absolute'])
-
return True
diff --git a/python.d/tomcat.chart.py b/python.d/tomcat.chart.py
index c20f85e1e..05547236a 100644
--- a/python.d/tomcat.chart.py
+++ b/python.d/tomcat.chart.py
@@ -3,12 +3,7 @@
# Author: Pawel Krupa (paulfantom)
from base import UrlService
-from re import compile
-
-try:
- from urlparse import urlparse
-except ImportError:
- from urllib.parse import urlparse
+import xml.etree.ElementTree as ET
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -16,61 +11,142 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['accesses', 'volume', 'threads', 'jvm']
+ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured']
CHARTS = {
'accesses': {
'options': [None, "Requests", "requests/s", "statistics", "tomcat.accesses", "area"],
'lines': [
- ["requestCount", 'accesses', 'incremental']
+ ["requestCount", 'accesses', 'incremental'],
+ ["errorCount", 'errors', 'incremental'],
+ ]},
+ 'bandwidth': {
+ 'options': [None, "Bandwidth", "KB/s", "statistics", "tomcat.bandwidth", "area"],
+ 'lines': [
+ ["bytesSent", 'sent', 'incremental', 1, 1024],
+ ["bytesReceived", 'received', 'incremental', 1, 1024],
]},
- 'volume': {
- 'options': [None, "Volume", "KB/s", "volume", "tomcat.volume", "area"],
+ 'processing_time': {
+ 'options': [None, "processing time", "seconds", "statistics", "tomcat.processing_time", "area"],
'lines': [
- ["bytesSent", 'volume', 'incremental', 1, 1024]
+ ["processingTime", 'processing time', 'incremental', 1, 1000]
]},
'threads': {
- 'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "line"],
+ 'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "area"],
'lines': [
["currentThreadCount", 'current', "absolute"],
["currentThreadsBusy", 'busy', "absolute"]
]},
'jvm': {
- 'options': [None, "JVM Free Memory", "MB", "statistics", "tomcat.jvm", "area"],
+ 'options': [None, "JVM Memory Pool Usage", "MB", "memory", "tomcat.jvm", "stacked"],
+ 'lines': [
+ ["free", 'free', "absolute", 1, 1048576],
+ ["eden_used", 'eden', "absolute", 1, 1048576],
+ ["survivor_used", 'survivor', "absolute", 1, 1048576],
+ ["tenured_used", 'tenured', "absolute", 1, 1048576],
+ ["code_cache_used", 'code cache', "absolute", 1, 1048576],
+ ["compressed_used", 'compressed', "absolute", 1, 1048576],
+ ["metaspace_used", 'metaspace', "absolute", 1, 1048576],
+ ]},
+ 'jvm_eden': {
+ 'options': [None, "Eden Memory Usage", "MB", "memory", "tomcat.jvm_eden", "area"],
'lines': [
- ["free", None, "absolute", 1, 1048576]
- ]}
+ ["eden_used", 'used', "absolute", 1, 1048576],
+ ["eden_commited", 'commited', "absolute", 1, 1048576],
+ ["eden_max", 'max', "absolute", 1, 1048576]
+ ]},
+ 'jvm_survivor': {
+ 'options': [None, "Survivor Memory Usage", "MB", "memory", "tomcat.jvm_survivor", "area"],
+ 'lines': [
+ ["survivor_used", 'used', "absolute", 1, 1048576],
+ ["survivor_commited", 'commited', "absolute", 1, 1048576],
+ ["survivor_max", 'max', "absolute", 1, 1048576]
+ ]},
+ 'jvm_tenured': {
+ 'options': [None, "Tenured Memory Usage", "MB", "memory", "tomcat.jvm_tenured", "area"],
+ 'lines': [
+ ["tenured_used", 'used', "absolute", 1, 1048576],
+ ["tenured_commited", 'commited', "absolute", 1, 1048576],
+ ["tenured_max", 'max', "absolute", 1, 1048576]
+ ]},
}
-
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
self.url = self.configuration.get('url', "http://127.0.0.1:8080/manager/status?XML=true")
+ self.connector_name = self.configuration.get('connector_name', None)
self.order = ORDER
self.definitions = CHARTS
- def check(self):
- netloc = urlparse(self.url).netloc.rpartition(':')
- if netloc[1] == ':': port = netloc[2]
- else: port = 80
-
- self.regex_jvm = compile(r'<jvm>.*?</jvm>')
- self.regex_connector = compile(r'[a-z-]+%s.*?/connector' % port)
- self.regex = compile(r'([\w]+)=\\?[\'\"](\d+)\\?[\'\"]')
-
- return UrlService.check(self)
-
def _get_data(self):
"""
Format data received from http request
:return: dict
"""
- data = self._get_raw_data()
- if data:
- jvm = self.regex_jvm.findall(data) or ['']
- connector = self.regex_connector.findall(data) or ['']
- data = dict(self.regex.findall(''.join([jvm[0], connector[0]])))
-
- return data or None
+ data = None
+ raw_data = self._get_raw_data()
+ if raw_data:
+ xml = None
+ try:
+ xml = ET.fromstring(raw_data)
+ except ET.ParseError:
+ self.debug('%s is not a vaild XML page. Please add "?XML=true" to tomcat status page.' % self.url)
+ return None
+ data = {}
+
+ jvm = xml.find('jvm')
+
+ connector = None
+ if self.connector_name:
+ for conn in xml.findall('connector'):
+ if conn.get('name') == self.connector_name:
+ connector = conn
+ break
+ else:
+ connector = xml.find('connector')
+ memory = jvm.find('memory')
+ data['free'] = memory.get('free')
+ data['total'] = memory.get('total')
+
+ for pool in jvm.findall('memorypool'):
+ name = pool.get('name')
+ if name == 'Eden Space':
+ data['eden_used'] = pool.get('usageUsed')
+ data['eden_commited'] = pool.get('usageCommitted')
+ data['eden_max'] = pool.get('usageMax')
+ elif name == 'Survivor Space':
+ data['survivor_used'] = pool.get('usageUsed')
+ data['survivor_commited'] = pool.get('usageCommitted')
+ data['survivor_max'] = pool.get('usageMax')
+ elif name == 'Tenured Gen':
+ data['tenured_used'] = pool.get('usageUsed')
+ data['tenured_commited'] = pool.get('usageCommitted')
+ data['tenured_max'] = pool.get('usageMax')
+ elif name == 'Code Cache':
+ data['code_cache_used'] = pool.get('usageUsed')
+ data['code_cache_commited'] = pool.get('usageCommitted')
+ data['code_cache_max'] = pool.get('usageMax')
+ elif name == 'Compressed':
+ data['compressed_used'] = pool.get('usageUsed')
+ data['compressed_commited'] = pool.get('usageCommitted')
+ data['compressed_max'] = pool.get('usageMax')
+ elif name == 'Metaspace':
+ data['metaspace_used'] = pool.get('usageUsed')
+ data['metaspace_commited'] = pool.get('usageCommitted')
+ data['metaspace_max'] = pool.get('usageMax')
+
+ if connector:
+ thread_info = connector.find('threadInfo')
+ data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
+ data['currentThreadCount'] = thread_info.get('currentThreadCount')
+
+ request_info = connector.find('requestInfo')
+ data['processingTime'] = request_info.get('processingTime')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
+
+ return data or None
diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py
index 564c9f1dd..a5359bc4d 100644
--- a/python.d/web_log.chart.py
+++ b/python.d/web_log.chart.py
@@ -1,17 +1,20 @@
# -*- coding: utf-8 -*-
# Description: web log netdata python.d module
# Author: l2isbad
-import re
+
import bisect
-from os import access, R_OK
-from os.path import getsize
+import re
+
from collections import namedtuple, defaultdict
from copy import deepcopy
+from os import access, R_OK
+from os.path import getsize
try:
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
+
from base import LogService
import msg
@@ -475,7 +478,6 @@ class Web(Mixin):
unique_current = set()
timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
- ip_address_counter = {'unique_cur_ip': 0}
for line in filtered_data:
match = self.storage['regex'].search(line)
if match:
@@ -526,13 +528,14 @@ class Web(Mixin):
get_timings(timings=timings['resp_time_upstream'],
time=self.storage['func_resp_time'](float(match_dict['resp_time_upstream'])))
# requests per ip proto
- proto = 'ipv4' if '.' in match_dict['address'] else 'ipv6'
+ proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
self.data['req_' + proto] += 1
# unique clients ips
- if address_not_in_pool(pool=self.storage['unique_all_time'],
- address=match_dict['address'],
- pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
- self.data['unique_tot_' + proto] += 1
+ if self.conf.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match_dict['address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
if match_dict['address'] not in unique_current:
self.data['unique_cur_' + proto] += 1
unique_current.add(match_dict['address'])
@@ -558,14 +561,14 @@ class Web(Mixin):
"""
# REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
# 5. Bytes sent 6. Response length 7. Response process time
- default = re.compile(r'(?P<address>[\da-f.:]+)'
+ default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<method>[A-Z]+)'
r' (?P<url>[^ ]+)'
r' [A-Z]+/(?P<http_version>\d\.\d)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)')
- apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<method>[A-Z]+)'
r' (?P<url>[^ ]+)'
r' [A-Z]+/(?P<http_version>\d\.\d)"'
@@ -574,7 +577,7 @@ class Web(Mixin):
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+) ')
- apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<method>[A-Z]+)'
r' (?P<url>[^ ]+)'
r' [A-Z]+/(?P<http_version>\d\.\d)"'
@@ -690,14 +693,14 @@ class Web(Mixin):
if match_dict is None:
return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
' (you need to use ?P<subgroup_name>)')
- mandatory_dict = {'address': r'[\da-f.:]+',
+ mandatory_dict = {'address': r'[\da-f.:]+|localhost',
'code': r'[1-9]\d{2}',
'method': r'[A-Z]+',
'bytes_sent': r'\d+|-'}
optional_dict = {'resp_length': r'\d+',
'resp_time': r'[\d.]+',
'resp_time_upstream': r'[\d.-]+',
- 'http_version': r'\d\.\d'}
+ 'http_version': r'\d(\.\d)?'}
mandatory_values = set(mandatory_dict) - set(match_dict)
if mandatory_values:
@@ -853,6 +856,8 @@ class Squid(Mixin):
'chart': 'squid_mime_type',
'func_dim_id': lambda v: v.split('/')[0],
'func_dim': None}}
+ if not self.conf.get('all_time', True):
+ self.order.remove('squid_clients_all')
return True
def get_data(self, raw_data=None):
@@ -883,10 +888,11 @@ class Squid(Mixin):
proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
# unique clients ips
- if address_not_in_pool(pool=self.storage['unique_all_time'],
- address=match['client_address'],
- pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
- self.data['unique_tot_' + proto] += 1
+ if self.conf.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match['client_address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
if match['client_address'] not in unique_ip:
self.data['unique_' + proto] += 1
@@ -934,18 +940,21 @@ class Squid(Mixin):
:return:
"""
if code not in self.data:
- self.add_new_dimension(dimension_id=code, chart_key='squid_code')
+ self.add_new_dimension(dimension_id=code,
+ chart_key='squid_code')
self.data[code] += 1
- if '_' not in code:
- return
+
for tag in code.split('_'):
try:
chart_key = SQUID_CODES[tag]
except KeyError:
continue
- if tag not in self.data:
- self.add_new_dimension(dimension_id=tag, chart_key=chart_key)
- self.data[tag] += 1
+ dimension_id = '_'.join(['code_detailed', tag])
+ if dimension_id not in self.data:
+ self.add_new_dimension(dimension_id=dimension_id,
+ dimension=tag,
+ chart_key=chart_key)
+ self.data[dimension_id] += 1
def get_timings(timings, time):
diff --git a/src/Makefile.am b/src/Makefile.am
index 601d3204f..feddc326d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -37,6 +37,10 @@ if ENABLE_PLUGIN_FREEIPMI
plugins_PROGRAMS += freeipmi.plugin
endif
+if ENABLE_PLUGIN_CGROUP_NETWORK
+plugins_PROGRAMS += cgroup-network
+endif
+
netdata_SOURCES = \
adaptive_resortable_list.c \
adaptive_resortable_list.h \
@@ -117,6 +121,8 @@ netdata_SOURCES = \
rrdset.c \
rrdsetvar.c \
rrdvar.c \
+ signals.c \
+ signals.h \
simple_pattern.c \
simple_pattern.h \
socket.c \
@@ -129,7 +135,6 @@ netdata_SOURCES = \
storage_number.h \
sys_devices_system_edac_mc.c \
sys_devices_system_node.c \
- sys_fs_cgroup.c \
unit_test.c \
unit_test.h \
url.c url.h \
@@ -199,6 +204,7 @@ netdata_SOURCES += \
proc_vmstat.c \
proc_uptime.c \
sys_kernel_mm_ksm.c \
+ sys_fs_cgroup.c \
$(NULL)
endif
endif
@@ -244,3 +250,17 @@ freeipmi_plugin_SOURCES = \
freeipmi_plugin_LDADD = \
$(OPTIONAL_IPMIMONITORING_LIBS) \
$(NULL)
+
+cgroup_network_SOURCES = \
+ cgroup-network.c \
+ clocks.c clocks.h \
+ common.c common.h \
+ inlined.h \
+ log.c log.h \
+ procfile.c procfile.h \
+ $(NULL)
+
+cgroup_network_LDADD = \
+ $(OPTIONAL_MATH_LIBS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(NULL)
diff --git a/src/Makefile.in b/src/Makefile.in
index 3ce869b0d..bc73c7125 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -80,10 +80,11 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
sbin_PROGRAMS = netdata$(EXEEXT)
-plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
+plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3)
@ENABLE_PLUGIN_APPS_TRUE@am__append_1 = apps.plugin
@ENABLE_PLUGIN_FREEIPMI_TRUE@am__append_2 = freeipmi.plugin
-@FREEBSD_TRUE@am__append_3 = \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__append_3 = cgroup-network
+@FREEBSD_TRUE@am__append_4 = \
@FREEBSD_TRUE@ plugin_freebsd.c \
@FREEBSD_TRUE@ plugin_freebsd.h \
@FREEBSD_TRUE@ freebsd_sysctl.c \
@@ -96,7 +97,7 @@ plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
@FREEBSD_TRUE@ freebsd_ipfw.c \
@FREEBSD_TRUE@ $(NULL)
-@FREEBSD_FALSE@@MACOS_TRUE@am__append_4 = \
+@FREEBSD_FALSE@@MACOS_TRUE@am__append_5 = \
@FREEBSD_FALSE@@MACOS_TRUE@ plugin_macos.c \
@FREEBSD_FALSE@@MACOS_TRUE@ plugin_macos.h \
@FREEBSD_FALSE@@MACOS_TRUE@ macos_sysctl.c \
@@ -104,7 +105,7 @@ plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
@FREEBSD_FALSE@@MACOS_TRUE@ macos_fw.c \
@FREEBSD_FALSE@@MACOS_TRUE@ $(NULL)
-@FREEBSD_FALSE@@MACOS_FALSE@am__append_5 = \
+@FREEBSD_FALSE@@MACOS_FALSE@am__append_6 = \
@FREEBSD_FALSE@@MACOS_FALSE@ ipc.c ipc.h \
@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc.c \
@FREEBSD_FALSE@@MACOS_FALSE@ plugin_proc.h \
@@ -133,9 +134,10 @@ plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
@FREEBSD_FALSE@@MACOS_FALSE@ proc_vmstat.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_uptime.c \
@FREEBSD_FALSE@@MACOS_FALSE@ sys_kernel_mm_ksm.c \
+@FREEBSD_FALSE@@MACOS_FALSE@ sys_fs_cgroup.c \
@FREEBSD_FALSE@@MACOS_FALSE@ $(NULL)
-@FREEBSD_TRUE@am__append_6 = \
+@FREEBSD_TRUE@am__append_7 = \
@FREEBSD_TRUE@ plugin_freebsd.h \
@FREEBSD_TRUE@ $(NULL)
@@ -160,6 +162,8 @@ CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
@ENABLE_PLUGIN_APPS_TRUE@am__EXEEXT_1 = apps.plugin$(EXEEXT)
@ENABLE_PLUGIN_FREEIPMI_TRUE@am__EXEEXT_2 = freeipmi.plugin$(EXEEXT)
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__EXEEXT_3 = \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ cgroup-network$(EXEEXT)
am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" \
"$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" \
"$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)"
@@ -174,6 +178,11 @@ am_apps_plugin_OBJECTS = apps_plugin.$(OBJEXT) avl.$(OBJEXT) \
apps_plugin_OBJECTS = $(am_apps_plugin_OBJECTS)
am__DEPENDENCIES_1 =
apps_plugin_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
+am_cgroup_network_OBJECTS = cgroup-network.$(OBJEXT) clocks.$(OBJEXT) \
+ common.$(OBJEXT) log.$(OBJEXT) procfile.$(OBJEXT)
+cgroup_network_OBJECTS = $(am_cgroup_network_OBJECTS)
+cgroup_network_DEPENDENCIES = $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1)
am_freeipmi_plugin_OBJECTS = freeipmi_plugin.$(OBJEXT) \
clocks.$(OBJEXT) common.$(OBJEXT) log.$(OBJEXT) \
procfile.$(OBJEXT)
@@ -197,20 +206,19 @@ am__netdata_SOURCES_DIST = adaptive_resortable_list.c \
registry_url.c registry_url.h rrd.c rrd.h rrd2json.c \
rrd2json.h rrd2json_api_old.c rrd2json_api_old.h rrdcalc.c \
rrdcalctemplate.c rrddim.c rrddimvar.c rrdfamily.c rrdhost.c \
- rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c \
- simple_pattern.c simple_pattern.h socket.c socket.h \
+ rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c signals.c \
+ signals.h simple_pattern.c simple_pattern.h socket.c socket.h \
statistical.c statistical.h statsd.c statsd.h storage_number.c \
storage_number.h sys_devices_system_edac_mc.c \
- sys_devices_system_node.c sys_fs_cgroup.c unit_test.c \
- unit_test.h url.c url.h web_api_old.c web_api_old.h \
- web_api_v1.c web_api_v1.h web_buffer.c web_buffer.h \
- web_buffer_svg.c web_buffer_svg.h web_client.c web_client.h \
- web_server.c web_server.h plugin_freebsd.c plugin_freebsd.h \
- freebsd_sysctl.c freebsd_getmntinfo.c freebsd_getifaddrs.c \
- freebsd_devstat.c zfs_common.c zfs_common.h \
- freebsd_kstat_zfs.c freebsd_ipfw.c plugin_macos.c \
- plugin_macos.h macos_sysctl.c macos_mach_smi.c macos_fw.c \
- ipc.c ipc.h plugin_proc.c plugin_proc.h \
+ sys_devices_system_node.c unit_test.c unit_test.h url.c url.h \
+ web_api_old.c web_api_old.h web_api_v1.c web_api_v1.h \
+ web_buffer.c web_buffer.h web_buffer_svg.c web_buffer_svg.h \
+ web_client.c web_client.h web_server.c web_server.h \
+ plugin_freebsd.c plugin_freebsd.h freebsd_sysctl.c \
+ freebsd_getmntinfo.c freebsd_getifaddrs.c freebsd_devstat.c \
+ zfs_common.c zfs_common.h freebsd_kstat_zfs.c freebsd_ipfw.c \
+ plugin_macos.c plugin_macos.h macos_sysctl.c macos_mach_smi.c \
+ macos_fw.c ipc.c ipc.h plugin_proc.c plugin_proc.h \
plugin_proc_diskspace.c plugin_proc_diskspace.h \
proc_diskstats.c proc_interrupts.c proc_softirqs.c \
proc_loadavg.c proc_meminfo.c proc_net_dev.c \
@@ -219,7 +227,7 @@ am__netdata_SOURCES_DIST = adaptive_resortable_list.c \
proc_net_softnet_stat.c proc_net_stat_conntrack.c \
proc_net_stat_synproxy.c proc_spl_kstat_zfs.c proc_stat.c \
proc_sys_kernel_random_entropy_avail.c proc_vmstat.c \
- proc_uptime.c sys_kernel_mm_ksm.c
+ proc_uptime.c sys_kernel_mm_ksm.c sys_fs_cgroup.c
@FREEBSD_TRUE@am__objects_2 = plugin_freebsd.$(OBJEXT) \
@FREEBSD_TRUE@ freebsd_sysctl.$(OBJEXT) \
@FREEBSD_TRUE@ freebsd_getmntinfo.$(OBJEXT) \
@@ -255,7 +263,8 @@ am__netdata_SOURCES_DIST = adaptive_resortable_list.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_sys_kernel_random_entropy_avail.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_vmstat.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_uptime.$(OBJEXT) \
-@FREEBSD_FALSE@@MACOS_FALSE@ sys_kernel_mm_ksm.$(OBJEXT)
+@FREEBSD_FALSE@@MACOS_FALSE@ sys_kernel_mm_ksm.$(OBJEXT) \
+@FREEBSD_FALSE@@MACOS_FALSE@ sys_fs_cgroup.$(OBJEXT)
am_netdata_OBJECTS = adaptive_resortable_list.$(OBJEXT) \
appconfig.$(OBJEXT) avl.$(OBJEXT) backend_prometheus.$(OBJEXT) \
backends.$(OBJEXT) clocks.$(OBJEXT) common.$(OBJEXT) \
@@ -275,15 +284,14 @@ am_netdata_OBJECTS = adaptive_resortable_list.$(OBJEXT) \
rrdcalc.$(OBJEXT) rrdcalctemplate.$(OBJEXT) rrddim.$(OBJEXT) \
rrddimvar.$(OBJEXT) rrdfamily.$(OBJEXT) rrdhost.$(OBJEXT) \
rrdpush.$(OBJEXT) rrdset.$(OBJEXT) rrdsetvar.$(OBJEXT) \
- rrdvar.$(OBJEXT) simple_pattern.$(OBJEXT) socket.$(OBJEXT) \
- statistical.$(OBJEXT) statsd.$(OBJEXT) \
+ rrdvar.$(OBJEXT) signals.$(OBJEXT) simple_pattern.$(OBJEXT) \
+ socket.$(OBJEXT) statistical.$(OBJEXT) statsd.$(OBJEXT) \
storage_number.$(OBJEXT) sys_devices_system_edac_mc.$(OBJEXT) \
- sys_devices_system_node.$(OBJEXT) sys_fs_cgroup.$(OBJEXT) \
- unit_test.$(OBJEXT) url.$(OBJEXT) web_api_old.$(OBJEXT) \
- web_api_v1.$(OBJEXT) web_buffer.$(OBJEXT) \
- web_buffer_svg.$(OBJEXT) web_client.$(OBJEXT) \
- web_server.$(OBJEXT) $(am__objects_2) $(am__objects_3) \
- $(am__objects_4)
+ sys_devices_system_node.$(OBJEXT) unit_test.$(OBJEXT) \
+ url.$(OBJEXT) web_api_old.$(OBJEXT) web_api_v1.$(OBJEXT) \
+ web_buffer.$(OBJEXT) web_buffer_svg.$(OBJEXT) \
+ web_client.$(OBJEXT) web_server.$(OBJEXT) $(am__objects_2) \
+ $(am__objects_3) $(am__objects_4)
netdata_OBJECTS = $(am_netdata_OBJECTS)
netdata_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
@@ -315,10 +323,11 @@ AM_V_CCLD = $(am__v_CCLD_@AM_V@)
am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
am__v_CCLD_0 = @echo " CCLD " $@;
am__v_CCLD_1 =
-SOURCES = $(apps_plugin_SOURCES) $(freeipmi_plugin_SOURCES) \
- $(netdata_SOURCES)
+SOURCES = $(apps_plugin_SOURCES) $(cgroup_network_SOURCES) \
+ $(freeipmi_plugin_SOURCES) $(netdata_SOURCES)
DIST_SOURCES = $(am__apps_plugin_SOURCES_DIST) \
- $(freeipmi_plugin_SOURCES) $(am__netdata_SOURCES_DIST)
+ $(cgroup_network_SOURCES) $(freeipmi_plugin_SOURCES) \
+ $(am__netdata_SOURCES_DIST)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
@@ -563,16 +572,15 @@ netdata_SOURCES = adaptive_resortable_list.c \
registry_url.c registry_url.h rrd.c rrd.h rrd2json.c \
rrd2json.h rrd2json_api_old.c rrd2json_api_old.h rrdcalc.c \
rrdcalctemplate.c rrddim.c rrddimvar.c rrdfamily.c rrdhost.c \
- rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c \
- simple_pattern.c simple_pattern.h socket.c socket.h \
+ rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c signals.c \
+ signals.h simple_pattern.c simple_pattern.h socket.c socket.h \
statistical.c statistical.h statsd.c statsd.h storage_number.c \
storage_number.h sys_devices_system_edac_mc.c \
- sys_devices_system_node.c sys_fs_cgroup.c unit_test.c \
- unit_test.h url.c url.h web_api_old.c web_api_old.h \
- web_api_v1.c web_api_v1.h web_buffer.c web_buffer.h \
- web_buffer_svg.c web_buffer_svg.h web_client.c web_client.h \
- web_server.c web_server.h $(NULL) $(am__append_3) \
- $(am__append_4) $(am__append_5)
+ sys_devices_system_node.c unit_test.c unit_test.h url.c url.h \
+ web_api_old.c web_api_old.h web_api_v1.c web_api_v1.h \
+ web_buffer.c web_buffer.h web_buffer_svg.c web_buffer_svg.h \
+ web_client.c web_client.h web_server.c web_server.h $(NULL) \
+ $(am__append_4) $(am__append_5) $(am__append_6)
netdata_LDADD = \
$(OPTIONAL_MATH_LIBS) \
$(OPTIONAL_NFACCT_LIBS) \
@@ -582,7 +590,7 @@ netdata_LDADD = \
apps_plugin_SOURCES = apps_plugin.c avl.c avl.h clocks.c clocks.h \
common.c common.h inlined.h log.c log.h procfile.c procfile.h \
- web_buffer.c web_buffer.h $(NULL) $(am__append_6)
+ web_buffer.c web_buffer.h $(NULL) $(am__append_7)
apps_plugin_LDADD = \
$(OPTIONAL_MATH_LIBS) \
$(OPTIONAL_LIBCAP_LIBS) \
@@ -601,6 +609,20 @@ freeipmi_plugin_LDADD = \
$(OPTIONAL_IPMIMONITORING_LIBS) \
$(NULL)
+cgroup_network_SOURCES = \
+ cgroup-network.c \
+ clocks.c clocks.h \
+ common.c common.h \
+ inlined.h \
+ log.c log.h \
+ procfile.c procfile.h \
+ $(NULL)
+
+cgroup_network_LDADD = \
+ $(OPTIONAL_MATH_LIBS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(NULL)
+
all: all-am
.SUFFIXES:
@@ -724,6 +746,10 @@ apps.plugin$(EXEEXT): $(apps_plugin_OBJECTS) $(apps_plugin_DEPENDENCIES) $(EXTRA
@rm -f apps.plugin$(EXEEXT)
$(AM_V_CCLD)$(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS)
+cgroup-network$(EXEEXT): $(cgroup_network_OBJECTS) $(cgroup_network_DEPENDENCIES) $(EXTRA_cgroup_network_DEPENDENCIES)
+ @rm -f cgroup-network$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(cgroup_network_OBJECTS) $(cgroup_network_LDADD) $(LIBS)
+
freeipmi.plugin$(EXEEXT): $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_DEPENDENCIES) $(EXTRA_freeipmi_plugin_DEPENDENCIES)
@rm -f freeipmi.plugin$(EXEEXT)
$(AM_V_CCLD)$(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS)
@@ -744,6 +770,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/avl.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backend_prometheus.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backends.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cgroup-network.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/clocks.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/daemon.Po@am__quote@
@@ -820,6 +847,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdset.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdsetvar.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdvar.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signals.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple_pattern.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/socket.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/statistical.Po@am__quote@
diff --git a/src/appconfig.c b/src/appconfig.c
index 91c4c5c54..2c7721b8c 100644
--- a/src/appconfig.c
+++ b/src/appconfig.c
@@ -317,7 +317,7 @@ int appconfig_get_boolean(struct config *root, const char *section, const char *
s = appconfig_get(root, section, name, s);
if(!s) return value;
- if(!strcmp(s, "yes") || !strcmp(s, "auto") || !strcmp(s, "on demand")) return 1;
+ if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) return 1;
return 0;
}
diff --git a/src/apps_plugin.c b/src/apps_plugin.c
index ecb6aaeac..c0eb5c083 100644
--- a/src/apps_plugin.c
+++ b/src/apps_plugin.c
@@ -3197,7 +3197,7 @@ static void parse_args(int argc, char **argv)
}
}
- if(strcmp("version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
printf("apps.plugin %s\n", VERSION);
exit(0);
}
diff --git a/src/cgroup-network.c b/src/cgroup-network.c
new file mode 100644
index 000000000..8894d60ee
--- /dev/null
+++ b/src/cgroup-network.c
@@ -0,0 +1,340 @@
+#include "common.h"
+
+#ifdef HAVE_SETNS
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#endif
+#include <sched.h>
+#endif
+
+// ----------------------------------------------------------------------------
+// callback required by fatal()
+
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+struct iface {
+ const char *device;
+ uint32_t hash;
+
+ unsigned int ifindex;
+ unsigned int iflink;
+
+ struct iface *next;
+};
+
+unsigned int read_iface_iflink(const char *prefix, const char *iface) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix?prefix:"", iface);
+
+ unsigned long long iflink = 0;
+ int ret = read_single_number_file(filename, &iflink);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)iflink;
+}
+
+unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix?prefix:"", iface);
+
+ unsigned long long ifindex = 0;
+ int ret = read_single_number_file(filename, &ifindex);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)ifindex;
+}
+
+struct iface *read_proc_net_dev(const char *prefix) {
+ procfile *ff = NULL;
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", prefix?prefix:"", "/proc/net/dev");
+ ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("Cannot open file '%s'", filename);
+ return NULL;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ error("Cannot read file '%s'", filename);
+ return NULL;
+ }
+
+ size_t lines = procfile_lines(ff), l;
+ struct iface *root = NULL;
+ for(l = 2; l < lines ;l++) {
+ if (unlikely(procfile_linewords(ff, l) < 1)) continue;
+
+ struct iface *t = callocz(1, sizeof(struct iface));
+ t->device = strdupz(procfile_lineword(ff, l, 0));
+ t->hash = simple_hash(t->device);
+ t->ifindex = read_iface_ifindex(prefix, t->device);
+ t->iflink = read_iface_iflink(prefix, t->device);
+ t->next = root;
+ root = t;
+ }
+
+ procfile_close(ff);
+
+ return root;
+}
+
+int iface_is_eligible(struct iface *iface) {
+ if(iface->iflink != iface->ifindex)
+ return 1;
+
+ return 0;
+}
+
+int eligible_ifaces(struct iface *root) {
+ int eligible = 0;
+
+ struct iface *t;
+ for(t = root; t ; t = t->next)
+ if(iface_is_eligible(t))
+ eligible++;
+
+ return eligible;
+}
+
+static void continue_as_child(void) {
+ pid_t child = fork();
+ int status;
+ pid_t ret;
+
+ if (child < 0)
+ error("fork() failed");
+
+ /* Only the child returns */
+ if (child == 0)
+ return;
+
+ for (;;) {
+ ret = waitpid(child, &status, WUNTRACED);
+ if ((ret == child) && (WIFSTOPPED(status))) {
+ /* The child suspended so suspend us as well */
+ kill(getpid(), SIGSTOP);
+ kill(child, SIGCONT);
+ } else {
+ break;
+ }
+ }
+
+ /* Return the child's exit code if possible */
+ if (WIFEXITED(status)) {
+ exit(WEXITSTATUS(status));
+ } else if (WIFSIGNALED(status)) {
+ kill(getpid(), WTERMSIG(status));
+ }
+
+ exit(EXIT_FAILURE);
+}
+
+int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix?prefix:"", (int)pid, ns);
+ int fd = open(filename, O_RDONLY);
+
+ if(fd == -1)
+ error("Cannot open file '%s'", filename);
+
+ return fd;
+}
+
+static struct ns {
+ int nstype;
+ int fd;
+ int status;
+ const char *name;
+ const char *path;
+} all_ns[] = {
+ // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" },
+ // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" },
+ // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" },
+ // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" },
+ { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" },
+ { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" },
+ { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" },
+
+ // terminator
+ { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
+};
+
+int switch_namespace(const char *prefix, pid_t pid) {
+#ifdef HAVE_SETNS
+
+ int i;
+ for(i = 0; all_ns[i].name ; i++)
+ all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
+
+ int root_fd = proc_pid_fd(prefix, "root", pid);
+ int cwd_fd = proc_pid_fd(prefix, "cwd", pid);
+
+ setgroups(0, NULL);
+
+ // 2 passes - found it at nsenter source code
+ // this is related CLONE_NEWUSER functionality
+
+ // FIXME: this code cannot switch user namespace
+ // Fortunately, we don't need it.
+
+ int pass, errors = 0;
+ for(pass = 0; pass < 2 ;pass++) {
+ for(i = 0; all_ns[i].name ; i++) {
+ if (all_ns[i].fd != -1 && all_ns[i].status == -1) {
+ if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
+ if(pass == 1) {
+ all_ns[i].status = 0;
+ error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
+ errors++;
+ }
+ }
+ else
+ all_ns[i].status = 1;
+ }
+ }
+ }
+
+ setgroups(0, NULL);
+
+ if(root_fd != -1) {
+ if(fchdir(root_fd) < 0)
+ error("Cannot fchdir() to pid %d root directory", (int)pid);
+
+ if(chroot(".") < 0)
+ error("Cannot chroot() to pid %d root directory", (int)pid);
+
+ close(root_fd);
+ }
+
+ if(cwd_fd != -1) {
+ if(fchdir(cwd_fd) < 0)
+ error("Cannot fchdir() to pid %d current working directory", (int)pid);
+
+ close(cwd_fd);
+ }
+
+ int do_fork = 0;
+ for(i = 0; all_ns[i].name ; i++)
+ if(all_ns[i].fd != -1) {
+
+ // CLONE_NEWPID requires a fork() to become effective
+ if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status)
+ do_fork = 1;
+
+ close(all_ns[i].fd);
+ }
+
+ if(do_fork)
+ continue_as_child();
+
+ return 0;
+
+#else
+
+ errno = ENOSYS;
+ error("setns() is missing on this system.");
+ return 1;
+
+#endif
+}
+
+pid_t read_pid_from_cgroup(const char *path) {
+ char buffer[FILENAME_MAX + 1];
+
+ snprintfz(buffer, FILENAME_MAX, "%s/cgroup.procs", path);
+ FILE *fp = fopen(buffer, "r");
+ if(!fp) {
+ error("Cannot read file '%s'.", buffer);
+ snprintfz(buffer, FILENAME_MAX, "%s/tasks", path);
+ fp = fopen(buffer, "r");
+ }
+
+ if(!fp) {
+ error("Cannot read file '%s'.", buffer);
+ return 0;
+ }
+
+ pid_t pid = 0;
+ char *s;
+ while((s = fgets(buffer, FILENAME_MAX, fp))) {
+ pid = atoi(s);
+ if(pid > 0) break;
+ }
+
+ fclose(fp);
+ return pid;
+}
+
+void usage(void) {
+ fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name);
+ exit(1);
+}
+
+int main(int argc, char **argv) {
+ pid_t pid = 0;
+
+ program_name = argv[0];
+ program_version = VERSION;
+ error_log_syslog = 0;
+
+ if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
+ fprintf(stderr, "cgroup-network %s\n", VERSION);
+ exit(0);
+ }
+
+ if(argc != 3)
+ usage();
+
+ if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) {
+ pid = atoi(argv[2]);
+ }
+ else if(!strcmp(argv[1], "--cgroup")) {
+ pid = read_pid_from_cgroup(argv[2]);
+ }
+ else
+ usage();
+
+ if(pid <= 0)
+ fatal("Invalid pid %d", (int)pid);
+
+ struct iface *host, *cgroup, *h, *c;
+ const char *prefix = getenv("NETDATA_HOST_PREFIX");
+
+ host = read_proc_net_dev(prefix);
+ if(!host)
+ fatal("cannot read host interface list.");
+
+ if(!eligible_ifaces(host))
+ fatal("there are no double-linked host interfaces available.");
+
+ if(switch_namespace(prefix, pid))
+ fatal("cannot switch to the namespace of pid %u", (unsigned int)pid);
+
+ cgroup = read_proc_net_dev(NULL);
+ if(!cgroup)
+ fatal("cannot read cgroup interface list.");
+
+ if(!eligible_ifaces(cgroup))
+ fatal("there are not double-linked cgroup interfaces available.");
+
+ int found = 0;
+ for(h = host; h ; h = h->next) {
+ if(iface_is_eligible(h)) {
+ for (c = cgroup; c; c = c->next) {
+ if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
+ printf("%s %s\n", h->device, c->device);
+ found++;
+ }
+ }
+ }
+ }
+
+ if(!found)
+ return 1;
+
+ return 0;
+}
diff --git a/src/common.c b/src/common.c
index aa75c198d..5a953672d 100644
--- a/src/common.c
+++ b/src/common.c
@@ -227,9 +227,18 @@ void json_escape_string(char *dst, const char *src, size_t size) {
}
void json_fix_string(char *s) {
- for( ; *s ;s++) {
- if(unlikely(*s == '\\')) *s = '/';
- else if(unlikely(*s == '"')) *s = '\'';
+ unsigned char c;
+ while((c = (unsigned char)*s)) {
+ if(unlikely(c == '\\'))
+ *s++ = '/';
+ else if(unlikely(c == '"'))
+ *s++ = '\'';
+ else if(unlikely(isspace(c) || iscntrl(c)))
+ *s++ = ' ';
+ else if(unlikely(!isprint(c) || c > 127))
+ *s++ = '_';
+ else
+ s++;
}
}
diff --git a/src/common.h b/src/common.h
index efeebf16f..51d2bba55 100644
--- a/src/common.h
+++ b/src/common.h
@@ -80,6 +80,7 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <sys/un.h>
#include <time.h>
#include <unistd.h>
#include <uuid/uuid.h>
@@ -215,6 +216,7 @@
#include "web_client.h"
#include "web_server.h"
#include "registry.h"
+#include "signals.h"
#include "daemon.h"
#include "main.h"
#include "unit_test.h"
@@ -309,4 +311,6 @@ extern const char *program_version;
#endif
#endif
+#define BITS_IN_A_KILOBIT 1000
+
#endif /* NETDATA_COMMON_H */
diff --git a/src/daemon.c b/src/daemon.c
index bc02446e0..5c5333a36 100644
--- a/src/daemon.c
+++ b/src/daemon.c
@@ -3,45 +3,6 @@
char pidfile[FILENAME_MAX + 1] = "";
-void sig_handler_exit(int signo)
-{
- if(signo) {
- error_log_limit_unlimited();
- error("Received signal %d. Exiting...", signo);
- netdata_exit = 1;
- }
-}
-
-void sig_handler_logrotate(int signo)
-{
- if(signo) {
- error_log_limit_unlimited();
- info("Received signal %d to re-open the log files", signo);
- reopen_all_log_files();
- error_log_limit_reset();
- }
-}
-
-void sig_handler_save(int signo)
-{
- if(signo) {
- error_log_limit_unlimited();
- info("Received signal %d to save the database...", signo);
- rrdhost_save_all();
- error_log_limit_reset();
- }
-}
-
-void sig_handler_reload_health(int signo)
-{
- if(signo) {
- error_log_limit_unlimited();
- info("Received signal %d to reload health configuration...", signo);
- health_reload();
- error_log_limit_reset();
- }
-}
-
static void chown_open_file(int fd, uid_t uid, gid_t gid) {
if(fd == -1) return;
@@ -167,34 +128,70 @@ int become_user(const char *username, int pid_fd) {
#endif
static void oom_score_adj(void) {
- char buf[10 + 1];
- snprintfz(buf, 10, "%d", OOM_SCORE_ADJ_MAX);
+ char buf[30 + 1];
+ long long int old_score, wanted_score = OOM_SCORE_ADJ_MAX, final_score = 0;
+
+ // read the existing score
+ if(read_single_signed_number_file("/proc/self/oom_score_adj", &old_score)) {
+ error("Out-Of-Memory (OOM) score setting is not supported on this system.");
+ return;
+ }
+
+ if(old_score != 0)
+ wanted_score = old_score;
// check the environment
char *s = getenv("OOMScoreAdjust");
- if(!s || !*s) s = buf;
+ if(!s || !*s) {
+ snprintfz(buf, 30, "%d", (int)wanted_score);
+ s = buf;
+ }
// check netdata.conf configuration
s = config_get(CONFIG_SECTION_GLOBAL, "OOM score", s);
- if(!s || !*s) s = buf;
+ if(s && *s && (isdigit(*s) || *s == '-' || *s == '+'))
+ wanted_score = atoll(s);
+ else {
+ info("Out-Of-Memory (OOM) score not changed due to non-numeric setting: '%s' (running with %d)", s, (int)old_score);
+ return;
+ }
- if(!isdigit(*s) && *s != '-' && *s != '+') {
- info("Out-Of-Memory score not changed due to setting: '%s'", s);
+ if(wanted_score < OOM_SCORE_ADJ_MIN) {
+ error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN);
+ wanted_score = OOM_SCORE_ADJ_MIN;
+ }
+
+ if(wanted_score > OOM_SCORE_ADJ_MAX) {
+ error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX);
+ wanted_score = OOM_SCORE_ADJ_MAX;
+ }
+
+ if(old_score == wanted_score) {
+ info("Out-Of-Memory (OOM) score is already set to the wanted value %d", (int)old_score);
return;
}
- int done = 0;
+ int written = 0;
int fd = open("/proc/self/oom_score_adj", O_WRONLY);
if(fd != -1) {
- ssize_t len = strlen(s);
- if(len > 0 && write(fd, buf, (size_t)len) == len) done = 1;
+ snprintfz(buf, 30, "%d", (int)wanted_score);
+ ssize_t len = strlen(buf);
+ if(len > 0 && write(fd, buf, (size_t)len) == len) written = 1;
close(fd);
- }
- if(!done)
- error("Cannot adjust my Out-Of-Memory score to '%s'.", s);
+ if(written) {
+ if(read_single_signed_number_file("/proc/self/oom_score_adj", &final_score))
+ error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score);
+ else if(final_score == wanted_score)
+ info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score);
+ else
+ error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score);
+ }
+ else
+ error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score);
+ }
else
- info("Adjusted my Out-Of-Memory score to '%s'.", s);
+ error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing.");
}
static void process_nice_level(void) {
diff --git a/src/daemon.h b/src/daemon.h
index b193602d6..150d74e3a 100644
--- a/src/daemon.h
+++ b/src/daemon.h
@@ -1,11 +1,6 @@
#ifndef NETDATA_DAEMON_H
#define NETDATA_DAEMON_H 1
-extern void sig_handler_exit(int signo);
-extern void sig_handler_save(int signo);
-extern void sig_handler_logrotate(int signo);
-extern void sig_handler_reload_health(int signo);
-
extern int become_user(const char *username, int pid_fd);
extern int become_daemon(int dont_fork, const char *user);
diff --git a/src/freebsd_getifaddrs.c b/src/freebsd_getifaddrs.c
index 7355fac9e..1a4448bd3 100644
--- a/src/freebsd_getifaddrs.c
+++ b/src/freebsd_getifaddrs.c
@@ -2,7 +2,7 @@
#include <ifaddrs.h>
-struct network_interface {
+struct cgroup_network_interface {
char *name;
uint32_t hash;
size_t len;
@@ -41,14 +41,14 @@ struct network_interface {
RRDSET *st_events;
RRDDIM *rd_events_coll;
- struct network_interface *next;
+ struct cgroup_network_interface *next;
};
-static struct network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL;
+static struct cgroup_network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL;
static size_t network_interfaces_added = 0, network_interfaces_found = 0;
-static void network_interface_free(struct network_interface *ifm) {
+static void network_interface_free(struct cgroup_network_interface *ifm) {
if (likely(ifm->st_bandwidth))
rrdset_is_obsolete(ifm->st_bandwidth);
if (likely(ifm->st_packets))
@@ -68,7 +68,7 @@ static void network_interface_free(struct network_interface *ifm) {
static void network_interfaces_cleanup() {
if (likely(network_interfaces_found == network_interfaces_added)) return;
- struct network_interface *ifm = network_interfaces_root, *last = NULL;
+ struct cgroup_network_interface *ifm = network_interfaces_root, *last = NULL;
while(ifm) {
if (unlikely(!ifm->updated)) {
// info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT");
@@ -76,7 +76,7 @@ static void network_interfaces_cleanup() {
if (network_interfaces_last_used == ifm)
network_interfaces_last_used = last;
- struct network_interface *t = ifm;
+ struct cgroup_network_interface *t = ifm;
if (ifm == network_interfaces_root || !last)
network_interfaces_root = ifm = ifm->next;
@@ -95,8 +95,8 @@ static void network_interfaces_cleanup() {
}
}
-static struct network_interface *get_network_interface(const char *name) {
- struct network_interface *ifm;
+static struct cgroup_network_interface *get_network_interface(const char *name) {
+ struct cgroup_network_interface *ifm;
uint32_t hash = simple_hash(name);
@@ -117,7 +117,7 @@ static struct network_interface *get_network_interface(const char *name) {
}
// create a new one
- ifm = callocz(1, sizeof(struct network_interface));
+ ifm = callocz(1, sizeof(struct cgroup_network_interface));
ifm->name = strdupz(name);
ifm->hash = simple_hash(ifm->name);
ifm->len = strlen(ifm->name);
@@ -125,7 +125,7 @@ static struct network_interface *get_network_interface(const char *name) {
// link it to the end
if (network_interfaces_root) {
- struct network_interface *e;
+ struct cgroup_network_interface *e;
for(e = network_interfaces_root; e->next ; e = e->next) ;
e->next = ifm;
}
@@ -233,8 +233,8 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_AREA
);
- rd_in = rrddim_add(st, "InOctets", "received", 8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutOctets", "sent", -8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
} else
rrdset_next(st);
@@ -270,8 +270,8 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_AREA
);
- rd_in = rrddim_add(st, "received", NULL, 8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "sent", NULL, -8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ rd_in = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
} else
rrdset_next(st);
@@ -288,7 +288,7 @@ int do_getifaddrs(int update_every, usec_t dt) {
if (ifa->ifa_addr->sa_family != AF_LINK)
continue;
- struct network_interface *ifm = get_network_interface(ifa->ifa_name);
+ struct cgroup_network_interface *ifm = get_network_interface(ifa->ifa_name);
ifm->updated = 1;
network_interfaces_found++;
@@ -338,10 +338,8 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_AREA
);
- ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
} else
rrdset_next(ifm->st_bandwidth);
diff --git a/src/freebsd_sysctl.c b/src/freebsd_sysctl.c
index d2f0eaa82..a87b872d9 100644
--- a/src/freebsd_sysctl.c
+++ b/src/freebsd_sysctl.c
@@ -373,6 +373,76 @@ int do_kern_cp_times(int update_every, usec_t dt) {
}
// --------------------------------------------------------------------------------------------------------------------
+// dev.cpu.temperature
+
+int do_dev_cpu_temperature(int update_every, usec_t dt) {
+ (void)dt;
+
+ int i;
+ static int *mib = NULL;
+ static int *pcpu_temperature = NULL;
+ static int old_number_of_cpus = 0;
+ char char_mib[MAX_INT_DIGITS + 21];
+ char char_rd[MAX_INT_DIGITS + 9];
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ pcpu_temperature = reallocz(pcpu_temperature, sizeof(int) * number_of_cpus);
+ mib = reallocz(mib, sizeof(int) * number_of_cpus * 4);
+ if (unlikely(number_of_cpus > old_number_of_cpus))
+ memset(&mib[old_number_of_cpus * 4], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus));
+ }
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!(mib[i * 4])))
+ sprintf(char_mib, "dev.cpu.%d.temperature", i);
+ if (unlikely(getsysctl_simple(char_mib, &mib[i * 4], 4, &pcpu_temperature[i], sizeof(int)))) {
+ error("DISABLED: cpu.temperature chart");
+ error("DISABLED: dev.cpu.temperature module");
+ return 1;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st;
+ static RRDDIM **rd_pcpu_temperature;
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ rd_pcpu_temperature = reallocz(rd_pcpu_temperature, sizeof(RRDDIM) * number_of_cpus);
+ if (unlikely(number_of_cpus > old_number_of_cpus))
+ memset(&rd_pcpu_temperature[old_number_of_cpus], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus));
+ }
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("cpu",
+ "temperature",
+ NULL,
+ "temperature",
+ "cpu.temperatute",
+ "Core temperature",
+ "degree",
+ 1050,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ } else rrdset_next(st);
+
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!rd_pcpu_temperature[i])) {
+ sprintf(char_rd, "cpu%d.temp", i);
+ rd_pcpu_temperature[i] = rrddim_add(st, char_rd, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(st, rd_pcpu_temperature[i], (collected_number) ((double)pcpu_temperature[i] / 10 - 273.15));
+ }
+
+ rrdset_done(st);
+
+ old_number_of_cpus = number_of_cpus;
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
// hw.intrcnt
int do_hw_intcnt(int update_every, usec_t dt) {
@@ -1985,7 +2055,7 @@ int do_net_inet_icmp_stats(int update_every, usec_t dt) {
static RRDDIM *rd_in_reps = NULL, *rd_out_reps = NULL, *rd_in = NULL, *rd_out = NULL;
if (unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "icmpmsg", NULL, "icmp", NULL, "IPv4 ICMP Messsages",
+ st = rrdset_create_localhost("ipv4", "icmpmsg", NULL, "icmp", NULL, "IPv4 ICMP Messages",
"packets/s", 2604, update_every, RRDSET_TYPE_LINE);
rd_in_reps = rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/freeipmi_plugin.c b/src/freeipmi_plugin.c
index 146268a53..42a1ac01d 100644
--- a/src/freeipmi_plugin.c
+++ b/src/freeipmi_plugin.c
@@ -1433,7 +1433,7 @@ int main (int argc, char **argv) {
continue;
}
}
- else if(strcmp("version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
printf("freeipmi.plugin %s\n", VERSION);
exit(0);
}
diff --git a/src/global_statistics.c b/src/global_statistics.c
index 8575061ac..886889086 100644
--- a/src/global_statistics.c
+++ b/src/global_statistics.c
@@ -199,8 +199,8 @@ void global_statistics_charts(void) {
stbytes = rrdset_create_localhost("netdata", "net", NULL, "netdata", NULL, "NetData Network Traffic"
, "kilobits/s", 130000, localhost->rrd_update_every, RRDSET_TYPE_AREA);
- rrddim_add(stbytes, "in", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(stbytes, "out", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(stbytes, "in", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(stbytes, "out", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
} else rrdset_next(stbytes);
rrddim_set(stbytes, "in", (collected_number) gs.bytes_received);
diff --git a/src/health.c b/src/health.c
index cc470f81f..136a1ecd7 100644
--- a/src/health.c
+++ b/src/health.c
@@ -204,14 +204,13 @@ static inline void health_process_notifications(RRDHOST *host, ALARM_ENTRY *ae)
}
static inline void health_alarm_log_process(RRDHOST *host) {
- static uint32_t stop_at_id = 0;
uint32_t first_waiting = (host->health_log.alarms)?host->health_log.alarms->unique_id:0;
time_t now = now_realtime_sec();
netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
ALARM_ENTRY *ae;
- for(ae = host->health_log.alarms; ae && ae->unique_id >= stop_at_id ; ae = ae->next) {
+ for(ae = host->health_log.alarms; ae && ae->unique_id >= host->health_last_processed_id ; ae = ae->next) {
if(unlikely(
!(ae->flags & HEALTH_ENTRY_FLAG_PROCESSED) &&
!(ae->flags & HEALTH_ENTRY_FLAG_UPDATED)
@@ -226,7 +225,7 @@ static inline void health_alarm_log_process(RRDHOST *host) {
}
// remember this for the next iteration
- stop_at_id = first_waiting;
+ host->health_last_processed_id = first_waiting;
netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
@@ -323,6 +322,23 @@ static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run)
return 1;
}
+static inline int check_if_resumed_from_suspention(void) {
+ static usec_t last_realtime = 0, last_monotonic = 0;
+ usec_t realtime = now_realtime_usec(), monotonic = now_monotonic_usec();
+ int ret = 0;
+
+ // detect if monotonic and realtime have twice the difference
+ // in which case we assume the system was just waken from hibernation
+
+ if(last_realtime && last_monotonic && realtime - last_realtime > 2 * (monotonic - last_monotonic))
+ ret = 1;
+
+ last_realtime = realtime;
+ last_monotonic = monotonic;
+
+ return ret;
+}
+
void *health_main(void *ptr) {
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
@@ -339,11 +355,8 @@ void *health_main(void *ptr) {
BUFFER *wb = buffer_create(100);
- time_t now = now_realtime_sec();
- time_t now_boottime = now_boottime_sec();
- time_t last_now = now;
- time_t last_now_boottime = now_boottime;
- time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60);
+ time_t now = now_realtime_sec();
+ time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60);
unsigned int loop = 0;
while(!netdata_exit) {
@@ -354,21 +367,14 @@ void *health_main(void *ptr) {
time_t next_run = now + min_run_every;
RRDCALC *rc;
- // detect if boottime and realtime have twice the difference
- // in which case we assume the system was just waken from hibernation
- if(unlikely(now - last_now > 2 * (now_boottime - last_now_boottime))) {
+ if(unlikely(check_if_resumed_from_suspention())) {
apply_hibernation_delay = 1;
- info("Postponing alarm checks for %ld seconds, due to boottime discrepancy (realtime dt: %ld, boottime dt: %ld)."
+ info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension."
, hibernation_delay
- , (long)(now - last_now)
- , (long)(now_boottime - last_now_boottime)
);
}
- last_now = now;
- last_now_boottime = now_boottime;
-
if(unlikely(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate) != 0))
error("Cannot set pthread cancel state to DISABLE.");
@@ -381,7 +387,7 @@ void *health_main(void *ptr) {
if(unlikely(apply_hibernation_delay)) {
- info("Postponing alarm checks for %ld seconds, on host '%s'."
+ info("Postponing health checks for %ld seconds, on host '%s'."
, hibernation_delay
, host->hostname
);
@@ -389,8 +395,13 @@ void *health_main(void *ptr) {
host->health_delay_up_to = now + hibernation_delay;
}
- if(unlikely(!host->health_enabled || now < host->health_delay_up_to))
- continue;
+ if(unlikely(host->health_delay_up_to)) {
+ if(unlikely(now < host->health_delay_up_to))
+ continue;
+
+ info("Resuming health checks on host '%s'.", host->hostname);
+ host->health_delay_up_to = 0;
+ }
rrdhost_rdlock(host);
@@ -726,8 +737,6 @@ void *health_main(void *ptr) {
else
debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration now", loop);
- now_boottime = now_boottime_sec();
-
} // forever
buffer_free(wb);
diff --git a/src/health_config.c b/src/health_config.c
index b4655dc78..2ead82ef5 100644
--- a/src/health_config.c
+++ b/src/health_config.c
@@ -6,6 +6,8 @@
#define HEALTH_ALARM_KEY "alarm"
#define HEALTH_TEMPLATE_KEY "template"
#define HEALTH_ON_KEY "on"
+#define HEALTH_HOST_KEY "hosts"
+#define HEALTH_OS_KEY "os"
#define HEALTH_FAMILIES_KEY "families"
#define HEALTH_LOOKUP_KEY "lookup"
#define HEALTH_CALC_KEY "calc"
@@ -88,7 +90,10 @@ static inline int rrdcalctemplate_add_template_from_config(RRDHOST *host, RRDCAL
RRDCALCTEMPLATE *t, *last = NULL;
for (t = host->templates; t ; last = t, t = t->next) {
- if(unlikely(t->hash_name == rt->hash_name && !strcmp(t->name, rt->name))) {
+ if(unlikely(t->hash_name == rt->hash_name
+ && !strcmp(t->name, rt->name)
+ && !strcmp(t->family_match?t->family_match:"*", rt->family_match?rt->family_match:"*")
+ )) {
error("Health configuration template '%s' already exists for host '%s'.", rt->name, host->hostname);
return 0;
}
@@ -400,7 +405,9 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
static uint32_t
hash_alarm = 0,
hash_template = 0,
+ hash_os = 0,
hash_on = 0,
+ hash_host = 0,
hash_families = 0,
hash_calc = 0,
hash_green = 0,
@@ -422,6 +429,8 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
hash_alarm = simple_uhash(HEALTH_ALARM_KEY);
hash_template = simple_uhash(HEALTH_TEMPLATE_KEY);
hash_on = simple_uhash(HEALTH_ON_KEY);
+ hash_os = simple_uhash(HEALTH_OS_KEY);
+ hash_host = simple_uhash(HEALTH_HOST_KEY);
hash_families = simple_uhash(HEALTH_FAMILIES_KEY);
hash_calc = simple_uhash(HEALTH_CALC_KEY);
hash_lookup = simple_uhash(HEALTH_LOOKUP_KEY);
@@ -448,6 +457,7 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
RRDCALC *rc = NULL;
RRDCALCTEMPLATE *rt = NULL;
+ int ignore_this = 0;
size_t line = 0, append = 0;
char *s;
while((s = fgets(&buffer[append], (int)(HEALTH_CONF_MAX_LINE - append), fp)) || append) {
@@ -494,11 +504,11 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
uint32_t hash = simple_uhash(key);
if(hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) {
- if(rc && !rrdcalc_add_alarm_from_config(host, rc))
+ if (rc && (ignore_this || !rrdcalc_add_alarm_from_config(host, rc)))
rrdcalc_free(host, rc);
if(rt) {
- if (!rrdcalctemplate_add_template_from_config(host, rt))
+ if (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt))
rrdcalctemplate_free(host, rt);
rt = NULL;
}
@@ -516,15 +526,17 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
if(rrdvar_fix_name(rc->name))
error("Health configuration renamed alarm '%s' to '%s'", value, rc->name);
+
+ ignore_this = 0;
}
else if(hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) {
if(rc) {
- if(!rrdcalc_add_alarm_from_config(host, rc))
+ if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc))
rrdcalc_free(host, rc);
rc = NULL;
}
- if(rt && !rrdcalctemplate_add_template_from_config(host, rt))
+ if(rt && (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)))
rrdcalctemplate_free(host, rt);
rt = callocz(1, sizeof(RRDCALCTEMPLATE));
@@ -537,6 +549,40 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
if(rrdvar_fix_name(rt->name))
error("Health configuration renamed template '%s' to '%s'", value, rt->name);
+
+ ignore_this = 0;
+ }
+ else if(hash == hash_os && !strcasecmp(key, HEALTH_OS_KEY)) {
+ char *os_match = value;
+ SIMPLE_PATTERN *os_pattern = simple_pattern_create(os_match, SIMPLE_PATTERN_EXACT);
+
+ if(!simple_pattern_matches(os_pattern, host->os)) {
+ if(rc)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s/%s: host O/S does not match '%s'", host->hostname, rc->name, line, path, filename, os_match);
+
+ if(rt)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s/%s: host O/S does not match '%s'", host->hostname, rt->name, line, path, filename, os_match);
+
+ ignore_this = 1;
+ }
+
+ simple_pattern_free(os_pattern);
+ }
+ else if(hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) {
+ char *host_match = value;
+ SIMPLE_PATTERN *host_pattern = simple_pattern_create(host_match, SIMPLE_PATTERN_EXACT);
+
+ if(!simple_pattern_matches(host_pattern, host->hostname)) {
+ if(rc)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring alarm '%s' defined at %zu@%s/%s: hostname does not match '%s'", host->hostname, rc->name, line, path, filename, host_match);
+
+ if(rt)
+ debug(D_HEALTH, "HEALTH on '%s' ignoring template '%s' defined at %zu@%s/%s: hostname does not match '%s'", host->hostname, rt->name, line, path, filename, host_match);
+
+ ignore_this = 1;
+ }
+
+ simple_pattern_free(host_pattern);
}
else if(rc) {
if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) {
@@ -786,10 +832,10 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
}
}
- if(rc && !rrdcalc_add_alarm_from_config(host, rc))
+ if(rc && (ignore_this || !rrdcalc_add_alarm_from_config(host, rc)))
rrdcalc_free(host, rc);
- if(rt && !rrdcalctemplate_add_template_from_config(host, rt))
+ if(rt && (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)))
rrdcalctemplate_free(host, rt);
fclose(fp);
diff --git a/src/health_log.c b/src/health_log.c
index 95abcfe5f..9881d35d4 100644
--- a/src/health_log.c
+++ b/src/health_log.c
@@ -121,8 +121,6 @@ inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) {
}
inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename) {
- static uint32_t max_unique_id = 0, max_alarm_id = 0;
-
errno = 0;
char *s, *buf = mallocz(65536 + 1);
@@ -271,7 +269,7 @@ inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filena
ae->new_value = str2l(pointers[25]);
ae->old_value = str2l(pointers[26]);
- static char value_string[100 + 1];
+ char value_string[100 + 1];
freez(ae->old_value_string);
freez(ae->new_value_string);
ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1));
@@ -285,11 +283,11 @@ inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filena
}
else updated++;
- if(unlikely(ae->unique_id > max_unique_id))
- max_unique_id = ae->unique_id;
+ if(unlikely(ae->unique_id > host->health_max_unique_id))
+ host->health_max_unique_id = ae->unique_id;
- if(unlikely(ae->alarm_id >= max_alarm_id))
- max_alarm_id = ae->alarm_id;
+ if(unlikely(ae->alarm_id >= host->health_max_alarm_id))
+ host->health_max_alarm_id = ae->alarm_id;
}
else {
error("HEALTH [%s]: line %zu of file '%s' is invalid (unrecognized entry type '%s').", host->hostname, line, filename, pointers[0]);
@@ -301,11 +299,11 @@ inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filena
freez(buf);
- if(!max_unique_id) max_unique_id = (uint32_t)now_realtime_sec();
- if(!max_alarm_id) max_alarm_id = (uint32_t)now_realtime_sec();
+ if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec();
+ if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec();
- host->health_log.next_log_id = max_unique_id + 1;
- host->health_log.next_alarm_id = max_alarm_id + 1;
+ host->health_log.next_log_id = host->health_max_unique_id + 1;
+ host->health_log.next_alarm_id = host->health_max_alarm_id + 1;
debug(D_HEALTH, "HEALTH [%s]: loaded file '%s' with %zd new alarm entries, updated %zd alarms, errors %zd entries, duplicate %zd", host->hostname, filename, loaded, updated, errored, duplicate);
return loaded;
@@ -388,7 +386,7 @@ inline void health_alarm_log(
ae->old_value = old_value;
ae->new_value = new_value;
- static char value_string[100 + 1];
+ char value_string[100 + 1];
ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1));
ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1));
diff --git a/src/inlined.h b/src/inlined.h
index f1812ba1c..9ab2dca73 100644
--- a/src/inlined.h
+++ b/src/inlined.h
@@ -244,26 +244,48 @@ static inline char *strncpyz(char *dst, const char *src, size_t n) {
return p;
}
-static inline int read_single_number_file(const char *filename, unsigned long long *result) {
- char buffer[30 + 1];
-
+static inline int read_file(const char *filename, char *buffer, size_t size) {
int fd = open(filename, O_RDONLY, 0666);
- if(unlikely(fd == -1)) {
- *result = 0;
+ if(unlikely(fd == -1))
return 1;
- }
- ssize_t r = read(fd, buffer, 30);
+ ssize_t r = read(fd, buffer, size);
if(unlikely(r == -1)) {
- *result = 0;
close(fd);
return 2;
}
+ buffer[r] = '\0';
close(fd);
+ return 0;
+}
+
+static inline int read_single_number_file(const char *filename, unsigned long long *result) {
+ char buffer[30 + 1];
+
+ int ret = read_file(filename, buffer, 30);
+ if(unlikely(ret)) {
+ *result = 0;
+ return ret;
+ }
+
buffer[30] = '\0';
*result = str2ull(buffer);
return 0;
}
+static inline int read_single_signed_number_file(const char *filename, long long *result) {
+ char buffer[30 + 1];
+
+ int ret = read_file(filename, buffer, 30);
+ if(unlikely(ret)) {
+ *result = 0;
+ return ret;
+ }
+
+ buffer[30] = '\0';
+ *result = atoll(buffer);
+ return 0;
+}
+
#endif //NETDATA_INLINED_H
diff --git a/src/macos_fw.c b/src/macos_fw.c
index c47da52f1..fa103e110 100644
--- a/src/macos_fw.c
+++ b/src/macos_fw.c
@@ -420,8 +420,8 @@ int do_macos_iokit(int update_every, usec_t dt) {
st = rrdset_create_localhost("net", ifa->ifa_name, NULL, ifa->ifa_name, "net.net", "Bandwidth"
, "kilobits/s", 7000, update_every, RRDSET_TYPE_AREA);
- rrddim_add(st, "received", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
diff --git a/src/macos_sysctl.c b/src/macos_sysctl.c
index af229fb61..843aceae0 100644
--- a/src/macos_sysctl.c
+++ b/src/macos_sysctl.c
@@ -299,8 +299,8 @@ int do_macos_sysctl(int update_every, usec_t dt) {
st = rrdset_create_localhost("system", "ipv4", NULL, "network", NULL, "IPv4 Bandwidth", "kilobits/s"
, 500, update_every, RRDSET_TYPE_AREA);
- rrddim_add(st, "InOctets", "received", 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutOctets", "sent", -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
@@ -588,7 +588,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
if (likely(do_icmpmsg)) {
st = rrdset_find_localhost("ipv4.icmpmsg");
if (unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "icmpmsg", NULL, "icmp", NULL, "IPv4 ICMP Messsages"
+ st = rrdset_create_localhost("ipv4", "icmpmsg", NULL, "icmp", NULL, "IPv4 ICMP Messages"
, "packets/s", 2604, update_every, RRDSET_TYPE_LINE);
rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/main.c b/src/main.c
index bf5d787ad..89ca828a1 100644
--- a/src/main.c
+++ b/src/main.c
@@ -172,7 +172,7 @@ void kill_childs()
// it is detached
// pthread_join(w->thread, NULL);
- w->obsolete = 1;
+ WEB_CLIENT_IS_OBSOLETE(w);
}
int i;
@@ -635,6 +635,7 @@ int main(int argc, char **argv) {
char* debug_flags_string = "debug_flags=";
if(strcmp(optarg, "unittest") == 0) {
+ if(unit_test_buffer()) exit(1);
if(unit_test_str2ld()) exit(1);
//default_rrd_update_every = 1;
//default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
@@ -854,47 +855,10 @@ int main(int argc, char **argv) {
// block signals while initializing threads.
// this causes the threads to block signals.
- sigset_t sigset;
- sigfillset(&sigset);
- if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1)
- error("Could not block signals for threads");
+ signals_block();
- // Catch signals which we want to use
- struct sigaction sa;
- sa.sa_flags = 0;
-
- // ingore all signals while we run in a signal handler
- sigfillset(&sa.sa_mask);
-
- // INFO: If we add signals here we have to unblock them
- // at popen.c when running a external plugin.
-
- // Ignore SIGPIPE completely.
- sa.sa_handler = SIG_IGN;
- if(sigaction(SIGPIPE, &sa, NULL) == -1)
- error("Failed to change signal handler for SIGPIPE");
-
- sa.sa_handler = sig_handler_exit;
- if(sigaction(SIGINT, &sa, NULL) == -1)
- error("Failed to change signal handler for SIGINT");
-
- sa.sa_handler = sig_handler_exit;
- if(sigaction(SIGTERM, &sa, NULL) == -1)
- error("Failed to change signal handler for SIGTERM");
-
- sa.sa_handler = sig_handler_logrotate;
- if(sigaction(SIGHUP, &sa, NULL) == -1)
- error("Failed to change signal handler for SIGHUP");
-
- // save database on SIGUSR1
- sa.sa_handler = sig_handler_save;
- if(sigaction(SIGUSR1, &sa, NULL) == -1)
- error("Failed to change signal handler for SIGUSR1");
-
- // reload health configuration on SIGUSR2
- sa.sa_handler = sig_handler_reload_health;
- if(sigaction(SIGUSR2, &sa, NULL) == -1)
- error("Failed to change signal handler for SIGUSR2");
+ // setup the signals we want to use
+ signals_init();
// --------------------------------------------------------------------
@@ -939,11 +903,6 @@ int main(int argc, char **argv) {
user = config_get(CONFIG_SECTION_GLOBAL, "run as user", (passwd && passwd->pw_name)?passwd->pw_name:"");
}
- // IMPORTANT: these have to run once, while single threaded
- web_files_uid(); // IMPORTANT: web_files_uid() before web_files_gid()
- web_files_gid();
-
-
// --------------------------------------------------------------------
// create the listening sockets
@@ -974,6 +933,11 @@ int main(int argc, char **argv) {
info("netdata started on pid %d.", getpid());
+ // IMPORTANT: these have to run once, while single threaded
+ // but after we have switched user
+ web_files_uid();
+ web_files_gid();
+
// ------------------------------------------------------------------------
// set default pthread stack size - after we have forked
@@ -1025,21 +989,12 @@ int main(int argc, char **argv) {
// ------------------------------------------------------------------------
- // block signals while initializing threads.
- sigset_t sigset;
- sigfillset(&sigset);
+ // unblock signals
- if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) {
- error("Could not unblock signals for threads");
- }
+ signals_unblock();
- // Handle flags set in the signal handler.
- while(1) {
- pause();
- if(netdata_exit) {
- debug(D_EXIT, "Exit main loop of netdata.");
- netdata_cleanup_and_exit(0);
- exit(0);
- }
- }
+ // ------------------------------------------------------------------------
+ // Handle signals
+
+ signals_handle();
}
diff --git a/src/plugin_freebsd.c b/src/plugin_freebsd.c
index 020fdb41c..c09b28a5f 100644
--- a/src/plugin_freebsd.c
+++ b/src/plugin_freebsd.c
@@ -36,6 +36,7 @@ static struct freebsd_module {
// CPU metrics
{ .name = "kern.cp_times", .dim = "cp_times", .enabled = 1, .func = do_kern_cp_times },
+ { .name = "dev.cpu.temperature", .dim = "cpu_temperature", .enabled = 1, .func = do_dev_cpu_temperature },
// disk metrics
{ .name = "kern.devstat", .dim = "kern_devstat", .enabled = 1, .func = do_kern_devstat },
diff --git a/src/plugin_freebsd.h b/src/plugin_freebsd.h
index 541bf852f..78fe33d7e 100644
--- a/src/plugin_freebsd.h
+++ b/src/plugin_freebsd.h
@@ -17,6 +17,7 @@ extern int do_vm_loadavg(int update_every, usec_t dt);
extern int do_vm_vmtotal(int update_every, usec_t dt);
extern int do_kern_cp_time(int update_every, usec_t dt);
extern int do_kern_cp_times(int update_every, usec_t dt);
+extern int do_dev_cpu_temperature(int update_every, usec_t dt);
extern int do_hw_intcnt(int update_every, usec_t dt);
extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt);
extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt);
diff --git a/src/plugin_proc.h b/src/plugin_proc.h
index 688b23de9..72cfc6aa9 100644
--- a/src/plugin_proc.h
+++ b/src/plugin_proc.h
@@ -32,4 +32,8 @@ extern int get_numa_node_count(void);
// metrics that need to be shared among data collectors
extern unsigned long long tcpext_TCPSynRetrans;
+// netdev renames
+extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name);
+extern void netdev_rename_device_del(const char *host_device);
+
#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/src/plugin_proc_diskspace.c b/src/plugin_proc_diskspace.c
index 750086a2c..52c1f5ae6 100644
--- a/src/plugin_proc_diskspace.c
+++ b/src/plugin_proc_diskspace.c
@@ -6,6 +6,7 @@
static struct mountinfo *disk_mountinfo_root = NULL;
static int check_for_new_mountpoints_every = 15;
+static int cleanup_mount_points = 1;
static inline void mountinfo_reload(int force) {
static time_t last_loaded = 0;
@@ -58,7 +59,7 @@ int mount_point_cleanup(void *entry, void *data) {
return 0;
}
- if(likely(mp->collected)) {
+ if(likely(cleanup_mount_points && mp->collected)) {
mp->collected = 0;
mp->updated = 0;
mp->shown_error = 0;
@@ -328,6 +329,8 @@ void *proc_diskspace_main(void *ptr) {
int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1);
+ cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points);
+
int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
if(update_every < localhost->rrd_update_every)
update_every = localhost->rrd_update_every;
@@ -370,7 +373,8 @@ void *proc_diskspace_main(void *ptr) {
if(unlikely(netdata_exit)) break;
- dictionary_get_all(dict_mountpoints, mount_point_cleanup, NULL);
+ if(dict_mountpoints)
+ dictionary_get_all(dict_mountpoints, mount_point_cleanup, NULL);
if(vdo_cpu_netdata) {
static RRDSET *stcpu_thread = NULL, *st_duration = NULL;
diff --git a/src/plugin_tc.c b/src/plugin_tc.c
index 6bf5782a9..c928e61ba 100644
--- a/src/plugin_tc.c
+++ b/src/plugin_tc.c
@@ -392,7 +392,7 @@ static inline void tc_device_commit(struct tc_device *d) {
if(unlikely(!c->render)) continue;
if(unlikely(!c->rd_bytes))
- c->rd_bytes = rrddim_add(d->st_bytes, c->id, c->name?c->name:c->id, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ c->rd_bytes = rrddim_add(d->st_bytes, c->id, c->name?c->name:c->id, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
else if(unlikely(c->name_updated))
rrddim_set_name(d->st_bytes, c->rd_bytes, c->name);
diff --git a/src/plugins_d.c b/src/plugins_d.c
index 9eb102770..42433b552 100644
--- a/src/plugins_d.c
+++ b/src/plugins_d.c
@@ -217,47 +217,63 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
st = NULL;
}
else if(likely(hash == CHART_HASH && !strcmp(s, PLUGINSD_KEYWORD_CHART))) {
- int noname = 0;
st = NULL;
- if((words[1]) != NULL && (words[2]) != NULL && strcmp(words[1], words[2]) == 0)
- noname = 1;
+ char *type = words[1];
+ char *name = words[2];
+ char *title = words[3];
+ char *units = words[4];
+ char *family = words[5];
+ char *context = words[6];
+ char *chart = words[7];
+ char *priority_s = words[8];
+ char *update_every_s = words[9];
+ char *options = words[10];
- char *type = words[1];
+ // parse the id from type
char *id = NULL;
- if(likely(type)) {
- id = strchr(type, '.');
- if(likely(id)) { *id = '\0'; id++; }
+ if(likely(type && (id = strchr(type, '.')))) {
+ *id = '\0';
+ id++;
}
- char *name = words[2];
- char *title = words[3];
- char *units = words[4];
- char *family = words[5];
- char *context = words[6];
- char *chart = words[7];
- char *priority_s = words[8];
- char *update_every_s = words[9];
- char *options = words[10];
+ // make sure we have the required variables
if(unlikely(!type || !*type || !id || !*id)) {
error("PLUGINSD: '%s' is requesting a CHART, without a type.id, on host '%s'. Disabling it.", cd->fullfilename, host->hostname);
enabled = 0;
break;
}
+ // parse the name, and make sure it does not include 'type.'
+ if(unlikely(name && *name)) {
+ // when data are coming from slaves
+ // name will be type.name
+ // so we have to remove 'type.' from name too
+ size_t len = strlen(type);
+ if(strncmp(type, name, len) == 0 && name[len] == '.')
+ name = &name[len + 1];
+
+ // if the name is the same with the id,
+ // or is just 'NULL', clear it.
+ if(unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
+ name = NULL;
+ }
+
int priority = 1000;
- if(likely(priority_s)) priority = str2i(priority_s);
+ if(likely(priority_s && *priority_s)) priority = str2i(priority_s);
int update_every = cd->update_every;
- if(likely(update_every_s)) update_every = str2i(update_every_s);
+ if(likely(update_every_s && *update_every_s)) update_every = str2i(update_every_s);
if(unlikely(!update_every)) update_every = cd->update_every;
RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
if(unlikely(chart)) chart_type = rrdset_type_id(chart);
- if(unlikely(noname || !name || !*name || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0)) name = NULL;
- if(unlikely(!family || !*family)) family = NULL;
- if(unlikely(!context || !*context)) context = NULL;
+ if(unlikely(name && !*name)) name = NULL;
+ if(unlikely(family && !*family)) family = NULL;
+ if(unlikely(context && !*context)) context = NULL;
+ if(unlikely(!title)) title = "";
+ if(unlikely(!units)) units = "unknown";
st = rrdset_find_bytype(host, type, id);
if(unlikely(!st)) {
diff --git a/src/popen.c b/src/popen.c
index 8448b7311..27be61774 100644
--- a/src/popen.c
+++ b/src/popen.c
@@ -105,38 +105,8 @@ FILE *mypopen(const char *command, pid_t *pidptr)
#endif
// reset all signals
- {
- sigset_t sigset;
- sigfillset(&sigset);
-
- if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: could not unblock signals for threads.", command, getpid());
-
- // We only need to reset ignored signals.
- // Signals with signal handlers are reset by default.
- struct sigaction sa;
- sigemptyset(&sa.sa_mask);
- sa.sa_handler = SIG_DFL;
- sa.sa_flags = 0;
-
- if(sigaction(SIGINT, &sa, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: failed to set default signal handler for SIGINT.", command, getpid());
-
- if(sigaction(SIGTERM, &sa, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: failed to set default signal handler for SIGTERM.", command, getpid());
-
- if(sigaction(SIGPIPE, &sa, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: failed to set default signal handler for SIGPIPE.", command, getpid());
-
- if(sigaction(SIGHUP, &sa, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: failed to set default signal handler for SIGHUP.", command, getpid());
-
- if(sigaction(SIGUSR1, &sa, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: failed to set default signal handler for SIGUSR1.", command, getpid());
-
- if(sigaction(SIGUSR2, &sa, NULL) == -1)
- error("pre-execution of command '%s' on pid %d: failed to set default signal handler for SIGUSR2.", command, getpid());
- }
+ signals_unblock();
+ signals_reset();
debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid());
execl("/bin/sh", "sh", "-c", command, NULL);
diff --git a/src/proc_diskstats.c b/src/proc_diskstats.c
index d3fed5a6d..4a32ec949 100644
--- a/src/proc_diskstats.c
+++ b/src/proc_diskstats.c
@@ -273,7 +273,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
global_do_qops = CONFIG_BOOLEAN_AUTO,
global_do_util = CONFIG_BOOLEAN_AUTO,
global_do_backlog = CONFIG_BOOLEAN_AUTO,
- globals_initialized = 0;
+ globals_initialized = 0,
+ global_cleanup_removed_disks = 1;
if(unlikely(!globals_initialized)) {
globals_initialized = 1;
@@ -291,6 +292,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "utilization percentage for all disks", global_do_util);
global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "backlog for all disks", global_do_backlog);
+ global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);
+
char buffer[FILENAME_MAX + 1];
snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
@@ -806,7 +809,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
struct disk *d = disk_root, *last = NULL;
while(d) {
- if(unlikely(!d->updated)) {
+ if(unlikely(global_cleanup_removed_disks && !d->updated)) {
struct disk *t = d;
rrdset_obsolete_and_pointer_null(d->st_avgsz);
diff --git a/src/proc_interrupts.c b/src/proc_interrupts.c
index 082e1f57b..b9f3941d2 100644
--- a/src/proc_interrupts.c
+++ b/src/proc_interrupts.c
@@ -18,10 +18,10 @@ struct interrupt {
// since each interrupt is variable in size
// we use this to calculate its record size
-#define recordsize(cpus) (sizeof(struct interrupt) + (cpus * sizeof(struct cpu_interrupt)))
+#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
// given a base, get a pointer to each record
-#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[line * recordsize(cpus)])
+#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
static struct interrupt *irrs = NULL;
@@ -142,68 +142,106 @@ int do_proc_interrupts(int update_every, usec_t dt) {
irr->used = 1;
}
- RRDSET *st;
-
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("system", "interrupts");
- if(unlikely(!st)) st = rrdset_create_localhost("system", "interrupts", NULL, "interrupts", NULL, "System interrupts"
- , "interrupts/s", 1000, update_every, RRDSET_TYPE_STACKED);
- else rrdset_next(st);
+ static RRDSET *st_system_interrupts = NULL;
+ if(unlikely(!st_system_interrupts))
+ st_system_interrupts = rrdset_create_localhost(
+ "system"
+ , "interrupts"
+ , NULL
+ , "interrupts"
+ , NULL
+ , "System interrupts"
+ , "interrupts/s"
+ , 1000
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_system_interrupts);
for(l = 0; l < lines ;l++) {
struct interrupt *irr = irrindex(irrs, l, cpus);
if(unlikely(!irr->used)) continue;
+
// some interrupt may have changed without changing the total number of lines
// if the same number of interrupts have been added and removed between two
// calls of this function.
if(unlikely(!irr->rd || strncmp(irr->rd->name, irr->name, MAX_INTERRUPT_NAME) != 0)) {
- irr->rd = rrddim_find(st, irr->id);
+ irr->rd = rrddim_find(st_system_interrupts, irr->id);
+
if(unlikely(!irr->rd))
- irr->rd = rrddim_add(st, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ irr->rd = rrddim_add(st_system_interrupts, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
else
- rrddim_set_name(st, irr->rd, irr->name);
+ rrddim_set_name(st_system_interrupts, irr->rd, irr->name);
// also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
if(likely(do_per_core)) {
int c;
- for (c = 0; c < cpus ;c++)
- irr->cpu[c].rd = NULL;
+ for (c = 0; c < cpus ;c++) irr->cpu[c].rd = NULL;
}
}
- rrddim_set_by_pointer(st, irr->rd, irr->total);
+
+ rrddim_set_by_pointer(st_system_interrupts, irr->rd, irr->total);
}
- rrdset_done(st);
+
+ rrdset_done(st_system_interrupts);
+
+ // --------------------------------------------------------------------
if(likely(do_per_core)) {
+ static RRDSET **core_st = NULL;
+ static int old_cpus = 0;
+
+ if(old_cpus < cpus) {
+ core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
+ memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
+ old_cpus = cpus;
+ }
+
int c;
for(c = 0; c < cpus ;c++) {
- char id[50+1];
- snprintfz(id, 50, "cpu%d_interrupts", c);
+ if(unlikely(!core_st[c])) {
+ char id[50+1];
+ snprintfz(id, 50, "cpu%d_interrupts", c);
- st = rrdset_find_bytype_localhost("cpu", id);
- if(unlikely(!st)) {
char title[100+1];
snprintfz(title, 100, "CPU%d Interrupts", c);
- st = rrdset_create_localhost("cpu", id, NULL, "interrupts", "cpu.interrupts", title, "interrupts/s",
- 1100 + c, update_every, RRDSET_TYPE_STACKED);
+ core_st[c] = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "interrupts"
+ , "cpu.interrupts"
+ , title
+ , "interrupts/s"
+ , 1100 + c
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
}
- else rrdset_next(st);
+ else rrdset_next(core_st[c]);
for(l = 0; l < lines ;l++) {
struct interrupt *irr = irrindex(irrs, l, cpus);
+
if(unlikely(!irr->used)) continue;
+
if(unlikely(!irr->cpu[c].rd)) {
- irr->cpu[c].rd = rrddim_find(st, irr->id);
+ irr->cpu[c].rd = rrddim_find(core_st[c], irr->id);
+
if(unlikely(!irr->cpu[c].rd))
- irr->cpu[c].rd = rrddim_add(st, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
else
- rrddim_set_name(st, irr->cpu[c].rd, irr->name);
+ rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name);
}
- rrddim_set_by_pointer(st, irr->cpu[c].rd, irr->cpu[c].value);
+
+ rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
}
- rrdset_done(st);
+
+ rrdset_done(core_st[c]);
}
}
diff --git a/src/proc_loadavg.c b/src/proc_loadavg.c
index a48801b37..54fc545a5 100644
--- a/src/proc_loadavg.c
+++ b/src/proc_loadavg.c
@@ -7,7 +7,6 @@ int do_proc_loadavg(int update_every, usec_t dt) {
static procfile *ff = NULL;
static int do_loadavg = -1, do_all_processes = -1;
static usec_t next_loadavg_dt = 0;
- static RRDSET *load_chart = NULL, *processes_chart = NULL;
if(unlikely(!ff)) {
char filename[FILENAME_MAX + 1];
@@ -49,24 +48,33 @@ int do_proc_loadavg(int update_every, usec_t dt) {
if(next_loadavg_dt <= dt) {
if(likely(do_loadavg)) {
+ static RRDSET *load_chart = NULL;
+ static RRDDIM *rd_load1 = NULL, *rd_load5 = NULL, *rd_load15 = NULL;
+
if(unlikely(!load_chart)) {
- load_chart = rrdset_find_byname_localhost("system.load");
- if(unlikely(!load_chart)) {
- load_chart = rrdset_create_localhost("system", "load", NULL, "load", NULL, "System Load Average"
- , "load", 100, (update_every < MIN_LOADAVG_UPDATE_EVERY)
- ? MIN_LOADAVG_UPDATE_EVERY : update_every
- , RRDSET_TYPE_LINE);
- rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
+ load_chart = rrdset_create_localhost(
+ "system"
+ , "load"
+ , NULL
+ , "load"
+ , NULL
+ , "System Load Average"
+ , "load"
+ , 100
+ , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_load1 = rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load5 = rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load15 = rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
}
else
rrdset_next(load_chart);
- rrddim_set(load_chart, "load1", (collected_number) (load1 * 1000));
- rrddim_set(load_chart, "load5", (collected_number) (load5 * 1000));
- rrddim_set(load_chart, "load15", (collected_number) (load15 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load1, (collected_number) (load1 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load5, (collected_number) (load5 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load15, (collected_number) (load15 * 1000));
rrdset_done(load_chart);
next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
@@ -78,18 +86,28 @@ int do_proc_loadavg(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if(likely(do_all_processes)) {
+ static RRDSET *processes_chart = NULL;
+ static RRDDIM *rd_active = NULL;
+
if(unlikely(!processes_chart)) {
- processes_chart = rrdset_find_byname_localhost("system.active_processes");
- if(unlikely(!processes_chart)) {
- processes_chart = rrdset_create_localhost("system", "active_processes", NULL, "processes", NULL
- , "System Active Processes", "processes", 750, update_every
- , RRDSET_TYPE_LINE);
- rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
+ processes_chart = rrdset_create_localhost(
+ "system"
+ , "active_processes"
+ , NULL
+ , "processes"
+ , NULL
+ , "System Active Processes"
+ , "processes"
+ , 750
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
else rrdset_next(processes_chart);
- rrddim_set(processes_chart, "active", active_processes);
+ rrddim_set_by_pointer(processes_chart, rd_active, active_processes);
rrdset_done(processes_chart);
}
diff --git a/src/proc_meminfo.c b/src/proc_meminfo.c
index 6b0219cc9..152a6366e 100644
--- a/src/proc_meminfo.c
+++ b/src/proc_meminfo.c
@@ -132,54 +132,79 @@ int do_proc_meminfo(int update_every, usec_t dt) {
procfile_lineword(ff, l, 1)))) break;
}
- RRDSET *st;
-
// --------------------------------------------------------------------
// http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux
unsigned long long MemUsed = MemTotal - MemFree - Cached - Buffers;
if(do_ram) {
- st = rrdset_find_localhost("system.ram");
- if(!st) {
- st = rrdset_create_localhost("system", "ram", NULL, "ram", NULL, "System RAM", "MB", 200, update_every
- , RRDSET_TYPE_STACKED);
-
- rrddim_add(st, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_system_ram = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL, *rd_cached = NULL, *rd_buffers = NULL;
+
+ if(unlikely(!st_system_ram)) {
+ st_system_ram = rrdset_create_localhost(
+ "system"
+ , "ram"
+ , NULL
+ , "ram"
+ , NULL
+ , "System RAM"
+ , "MB"
+ , 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else rrdset_next(st_system_ram);
+
+ rrddim_set_by_pointer(st_system_ram, rd_free, MemFree);
+ rrddim_set_by_pointer(st_system_ram, rd_used, MemUsed);
+ rrddim_set_by_pointer(st_system_ram, rd_cached, Cached);
+ rrddim_set_by_pointer(st_system_ram, rd_buffers, Buffers);
- rrddim_set(st, "free", MemFree);
- rrddim_set(st, "used", MemUsed);
- rrddim_set(st, "cached", Cached);
- rrddim_set(st, "buffers", Buffers);
- rrdset_done(st);
+ rrdset_done(st_system_ram);
}
// --------------------------------------------------------------------
unsigned long long SwapUsed = SwapTotal - SwapFree;
- if(SwapTotal || SwapUsed || SwapFree || do_swap == CONFIG_BOOLEAN_YES) {
+ if(do_swap == CONFIG_BOOLEAN_YES || SwapTotal || SwapUsed || SwapFree) {
do_swap = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("system.swap");
- if(!st) {
- st = rrdset_create_localhost("system", "swap", NULL, "swap", NULL, "System Swap", "MB", 201, update_every
- , RRDSET_TYPE_STACKED);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_system_swap = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL;
+
+ if(unlikely(!st_system_swap)) {
+ st_system_swap = rrdset_create_localhost(
+ "system"
+ , "swap"
+ , NULL
+ , "swap"
+ , NULL
+ , "System Swap"
+ , "MB"
+ , 201
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else rrdset_next(st_system_swap);
- rrddim_set(st, "used", SwapUsed);
- rrddim_set(st, "free", SwapFree);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_system_swap, rd_used, SwapUsed);
+ rrddim_set_by_pointer(st_system_swap, rd_free, SwapFree);
+
+ rrdset_done(st_system_swap);
}
// --------------------------------------------------------------------
@@ -187,102 +212,171 @@ int do_proc_meminfo(int update_every, usec_t dt) {
if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && HardwareCorrupted > 0))) {
do_hwcorrupt = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("mem.hwcorrupt");
- if(!st) {
- st = rrdset_create_localhost("mem", "hwcorrupt", NULL, "ecc", NULL, "Hardware Corrupted ECC", "MB", 9000
- , update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_mem_hwcorrupt = NULL;
+ static RRDDIM *rd_corrupted = NULL;
+
+ if(unlikely(!st_mem_hwcorrupt)) {
+ st_mem_hwcorrupt = rrdset_create_localhost(
+ "mem"
+ , "hwcorrupt"
+ , NULL
+ , "ecc"
+ , NULL
+ , "Corrupted Memory, detected by ECC"
+ , "MB"
+ , 9000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL);
+
+ rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else rrdset_next(st_mem_hwcorrupt);
+
+ rrddim_set_by_pointer(st_mem_hwcorrupt, rd_corrupted, HardwareCorrupted);
- rrddim_set(st, "HardwareCorrupted", HardwareCorrupted);
- rrdset_done(st);
+ rrdset_done(st_mem_hwcorrupt);
}
// --------------------------------------------------------------------
if(do_committed) {
- st = rrdset_find_localhost("mem.committed");
- if(!st) {
- st = rrdset_create_localhost("mem", "committed", NULL, "system", NULL, "Committed (Allocated) Memory", "MB"
- , 5000, update_every, RRDSET_TYPE_AREA);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_mem_committed = NULL;
+ static RRDDIM *rd_committed = NULL;
+
+ if(unlikely(!st_mem_committed)) {
+ st_mem_committed = rrdset_create_localhost(
+ "mem"
+ , "committed"
+ , NULL
+ , "system"
+ , NULL
+ , "Committed (Allocated) Memory"
+ , "MB"
+ , 5000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL);
+
+ rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else rrdset_next(st_mem_committed);
+
+ rrddim_set_by_pointer(st_mem_committed, rd_committed, Committed_AS);
- rrddim_set(st, "Committed_AS", Committed_AS);
- rrdset_done(st);
+ rrdset_done(st_mem_committed);
}
// --------------------------------------------------------------------
if(do_writeback) {
- st = rrdset_find_localhost("mem.writeback");
- if(!st) {
- st = rrdset_create_localhost("mem", "writeback", NULL, "kernel", NULL, "Writeback Memory", "MB", 4000
- , update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_mem_writeback = NULL;
+ static RRDDIM *rd_dirty = NULL, *rd_writeback = NULL, *rd_fusewriteback = NULL, *rd_nfs_writeback = NULL, *rd_bounce = NULL;
+
+ if(unlikely(!st_mem_writeback)) {
+ st_mem_writeback = rrdset_create_localhost(
+ "mem"
+ , "writeback"
+ , NULL
+ , "kernel"
+ , NULL
+ , "Writeback Memory"
+ , "MB"
+ , 4000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL);
+
+ rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_fusewriteback = rrddim_add(st_mem_writeback, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_nfs_writeback = rrddim_add(st_mem_writeback, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_bounce = rrddim_add(st_mem_writeback, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
-
- rrddim_set(st, "Dirty", Dirty);
- rrddim_set(st, "Writeback", Writeback);
- rrddim_set(st, "FuseWriteback", WritebackTmp);
- rrddim_set(st, "NfsWriteback", NFS_Unstable);
- rrddim_set(st, "Bounce", Bounce);
- rrdset_done(st);
+ else rrdset_next(st_mem_writeback);
+
+ rrddim_set_by_pointer(st_mem_writeback, rd_dirty, Dirty);
+ rrddim_set_by_pointer(st_mem_writeback, rd_writeback, Writeback);
+ rrddim_set_by_pointer(st_mem_writeback, rd_fusewriteback, WritebackTmp);
+ rrddim_set_by_pointer(st_mem_writeback, rd_nfs_writeback, NFS_Unstable);
+ rrddim_set_by_pointer(st_mem_writeback, rd_bounce, Bounce);
+
+ rrdset_done(st_mem_writeback);
}
// --------------------------------------------------------------------
if(do_kernel) {
- st = rrdset_find_localhost("mem.kernel");
- if(!st) {
- st = rrdset_create_localhost("mem", "kernel", NULL, "kernel", NULL, "Memory Used by Kernel", "MB", 6000
- , update_every, RRDSET_TYPE_STACKED);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_mem_kernel = NULL;
+ static RRDDIM *rd_slab = NULL, *rd_kernelstack = NULL, *rd_pagetables = NULL, *rd_vmallocused = NULL;
+
+ if(unlikely(!st_mem_kernel)) {
+ st_mem_kernel = rrdset_create_localhost(
+ "mem"
+ , "kernel"
+ , NULL
+ , "kernel"
+ , NULL
+ , "Memory Used by Kernel"
+ , "MB"
+ , 6000
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL);
+
+ rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_vmallocused = rrddim_add(st_mem_kernel, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else rrdset_next(st_mem_kernel);
+
+ rrddim_set_by_pointer(st_mem_kernel, rd_slab, Slab);
+ rrddim_set_by_pointer(st_mem_kernel, rd_kernelstack, KernelStack);
+ rrddim_set_by_pointer(st_mem_kernel, rd_pagetables, PageTables);
+ rrddim_set_by_pointer(st_mem_kernel, rd_vmallocused, VmallocUsed);
- rrddim_set(st, "KernelStack", KernelStack);
- rrddim_set(st, "Slab", Slab);
- rrddim_set(st, "PageTables", PageTables);
- rrddim_set(st, "VmallocUsed", VmallocUsed);
- rrdset_done(st);
+ rrdset_done(st_mem_kernel);
}
// --------------------------------------------------------------------
if(do_slab) {
- st = rrdset_find_localhost("mem.slab");
- if(!st) {
- st = rrdset_create_localhost("mem", "slab", NULL, "slab", NULL, "Reclaimable Kernel Memory", "MB", 6500
- , update_every, RRDSET_TYPE_STACKED);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ static RRDSET *st_mem_slab = NULL;
+ static RRDDIM *rd_reclaimable = NULL, *rd_unreclaimable = NULL;
+
+ if(unlikely(!st_mem_slab)) {
+ st_mem_slab = rrdset_create_localhost(
+ "mem"
+ , "slab"
+ , NULL
+ , "slab"
+ , NULL
+ , "Reclaimable Kernel Memory"
+ , "MB"
+ , 6500
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL);
+
+ rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else rrdset_next(st_mem_slab);
+
+ rrddim_set_by_pointer(st_mem_slab, rd_reclaimable, SReclaimable);
+ rrddim_set_by_pointer(st_mem_slab, rd_unreclaimable, SUnreclaim);
- rrddim_set(st, "reclaimable", SReclaimable);
- rrddim_set(st, "unreclaimable", SUnreclaim);
- rrdset_done(st);
+ rrdset_done(st_mem_slab);
}
return 0;
diff --git a/src/proc_net_dev.c b/src/proc_net_dev.c
index ee7588990..6e12fc5dd 100644
--- a/src/proc_net_dev.c
+++ b/src/proc_net_dev.c
@@ -1,6 +1,9 @@
#include "common.h"
-struct netdev {
+// ----------------------------------------------------------------------------
+// netdev list
+
+static struct netdev {
char *name;
uint32_t hash;
size_t len;
@@ -18,6 +21,27 @@ struct netdev {
int do_compressed;
int do_events;
+ const char *chart_type_net_bytes;
+ const char *chart_type_net_packets;
+ const char *chart_type_net_errors;
+ const char *chart_type_net_fifo;
+ const char *chart_type_net_events;
+ const char *chart_type_net_drops;
+ const char *chart_type_net_compressed;
+
+ const char *chart_id_net_bytes;
+ const char *chart_id_net_packets;
+ const char *chart_id_net_errors;
+ const char *chart_id_net_fifo;
+ const char *chart_id_net_events;
+ const char *chart_id_net_drops;
+ const char *chart_id_net_compressed;
+
+ const char *chart_family;
+
+ int flipped;
+ unsigned long priority;
+
// data collected
kernel_uint_t rbytes;
kernel_uint_t rpackets;
@@ -66,13 +90,13 @@ struct netdev {
RRDDIM *rd_tcompressed;
struct netdev *next;
-};
-
-static struct netdev *netdev_root = NULL, *netdev_last_used = NULL;
+} *netdev_root = NULL, *netdev_last_used = NULL;
static size_t netdev_added = 0, netdev_found = 0;
-static void netdev_free(struct netdev *d) {
+// ----------------------------------------------------------------------------
+
+static void netdev_charts_release(struct netdev *d) {
if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth);
if(d->st_packets) rrdset_is_obsolete(d->st_packets);
if(d->st_errors) rrdset_is_obsolete(d->st_errors);
@@ -81,11 +105,229 @@ static void netdev_free(struct netdev *d) {
if(d->st_compressed) rrdset_is_obsolete(d->st_compressed);
if(d->st_events) rrdset_is_obsolete(d->st_events);
+ d->st_bandwidth = NULL;
+ d->st_compressed = NULL;
+ d->st_drops = NULL;
+ d->st_errors = NULL;
+ d->st_events = NULL;
+ d->st_fifo = NULL;
+ d->st_packets = NULL;
+
+ d->rd_rbytes = NULL;
+ d->rd_rpackets = NULL;
+ d->rd_rerrors = NULL;
+ d->rd_rdrops = NULL;
+ d->rd_rfifo = NULL;
+ d->rd_rframe = NULL;
+ d->rd_rcompressed = NULL;
+ d->rd_rmulticast = NULL;
+
+ d->rd_tbytes = NULL;
+ d->rd_tpackets = NULL;
+ d->rd_terrors = NULL;
+ d->rd_tdrops = NULL;
+ d->rd_tfifo = NULL;
+ d->rd_tcollisions = NULL;
+ d->rd_tcarrier = NULL;
+ d->rd_tcompressed = NULL;
+}
+
+static void netdev_free_strings(struct netdev *d) {
+ freez((void *)d->chart_type_net_bytes);
+ freez((void *)d->chart_type_net_compressed);
+ freez((void *)d->chart_type_net_drops);
+ freez((void *)d->chart_type_net_errors);
+ freez((void *)d->chart_type_net_events);
+ freez((void *)d->chart_type_net_fifo);
+ freez((void *)d->chart_type_net_packets);
+
+ freez((void *)d->chart_id_net_bytes);
+ freez((void *)d->chart_id_net_compressed);
+ freez((void *)d->chart_id_net_drops);
+ freez((void *)d->chart_id_net_errors);
+ freez((void *)d->chart_id_net_events);
+ freez((void *)d->chart_id_net_fifo);
+ freez((void *)d->chart_id_net_packets);
+
+ freez((void *)d->chart_family);
+}
+
+static void netdev_free(struct netdev *d) {
+ netdev_charts_release(d);
+ netdev_free_strings(d);
+
+ freez((void *)d->name);
+ freez((void *)d);
netdev_added--;
- freez(d->name);
- freez(d);
}
+
+// ----------------------------------------------------------------------------
+// netdev renames
+
+static struct netdev_rename {
+ const char *host_device;
+ uint32_t hash;
+
+ const char *container_device;
+ const char *container_name;
+
+ int processed;
+
+ struct netdev_rename *next;
+} *netdev_rename_root = NULL;
+
+static int netdev_pending_renames = 0;
+static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER;
+
+static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) {
+ struct netdev_rename *r;
+
+ for(r = netdev_rename_root; r ; r = r->next)
+ if(r->hash == hash && !strcmp(host_device, r->host_device))
+ return r;
+
+ return NULL;
+}
+
+// other threads can call this function to register a rename to a netdev
+void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ uint32_t hash = simple_hash(host_device);
+ struct netdev_rename *r = netdev_rename_find(host_device, hash);
+ if(!r) {
+ r = callocz(1, sizeof(struct netdev_rename));
+ r->host_device = strdupz(host_device);
+ r->container_device = strdupz(container_device);
+ r->container_name = strdupz(container_name);
+ r->hash = hash;
+ r->next = netdev_rename_root;
+ r->processed = 0;
+ netdev_rename_root = r;
+ netdev_pending_renames++;
+ info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+ }
+ else {
+ if(strcmp(r->container_device, container_device) != 0 || strcmp(r->container_name, container_name) != 0) {
+ freez((void *) r->container_device);
+ freez((void *) r->container_name);
+
+ r->container_device = strdupz(container_device);
+ r->container_name = strdupz(container_name);
+ r->processed = 0;
+ netdev_pending_renames++;
+ info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+ }
+ }
+
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+// other threads can call this function to delete a rename to a netdev
+void netdev_rename_device_del(const char *host_device) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ struct netdev_rename *r, *last = NULL;
+
+ uint32_t hash = simple_hash(host_device);
+ for(r = netdev_rename_root; r ; last = r, r = r->next) {
+ if (r->hash == hash && !strcmp(host_device, r->host_device)) {
+ if (netdev_rename_root == r)
+ netdev_rename_root = r->next;
+ else if (last)
+ last->next = r->next;
+
+ if(!r->processed)
+ netdev_pending_renames--;
+
+ info("CGROUP: unregistered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+
+ freez((void *) r->host_device);
+ freez((void *) r->container_name);
+ freez((void *) r->container_device);
+ freez((void *) r);
+ break;
+ }
+ }
+
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *r) {
+ info("CGROUP: renaming network interface '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+
+ netdev_charts_release(d);
+ netdev_free_strings(d);
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "cgroup_%s", r->container_name);
+ d->chart_type_net_bytes = strdupz(buffer);
+ d->chart_type_net_compressed = strdupz(buffer);
+ d->chart_type_net_drops = strdupz(buffer);
+ d->chart_type_net_errors = strdupz(buffer);
+ d->chart_type_net_events = strdupz(buffer);
+ d->chart_type_net_fifo = strdupz(buffer);
+ d->chart_type_net_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_%s", r->container_device);
+ d->chart_id_net_bytes = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_compressed_%s", r->container_device);
+ d->chart_id_net_compressed = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_drops_%s", r->container_device);
+ d->chart_id_net_drops = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_errors_%s", r->container_device);
+ d->chart_id_net_errors = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_events_%s", r->container_device);
+ d->chart_id_net_events = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_fifo_%s", r->container_device);
+ d->chart_id_net_fifo = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_packets_%s", r->container_device);
+ d->chart_id_net_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net %s", r->container_device);
+ d->chart_family = strdupz(buffer);
+
+ d->priority = 43000;
+ d->flipped = 1;
+}
+
+static inline void netdev_rename(struct netdev *d) {
+ struct netdev_rename *r = netdev_rename_find(d->name, d->hash);
+ if(unlikely(r && !r->processed)) {
+ netdev_rename_cgroup(d, r);
+ r->processed = 1;
+ netdev_pending_renames--;
+ }
+}
+
+static inline void netdev_rename_lock(struct netdev *d) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+ netdev_rename(d);
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+static inline void netdev_rename_all_lock(void) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ struct netdev *d;
+ for(d = netdev_root; d ; d = d->next)
+ netdev_rename(d);
+
+ netdev_pending_renames = 0;
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+// ----------------------------------------------------------------------------
+// netdev data collection
+
static void netdev_cleanup() {
if(likely(netdev_found == netdev_added)) return;
@@ -144,6 +386,28 @@ static struct netdev *get_netdev(const char *name) {
d->name = strdupz(name);
d->hash = simple_hash(d->name);
d->len = strlen(d->name);
+
+ d->chart_type_net_bytes = strdupz("net");
+ d->chart_type_net_compressed = strdupz("net_compressed");
+ d->chart_type_net_drops = strdupz("net_drops");
+ d->chart_type_net_errors = strdupz("net_errors");
+ d->chart_type_net_events = strdupz("net_events");
+ d->chart_type_net_fifo = strdupz("net_fifo");
+ d->chart_type_net_packets = strdupz("net_packets");
+
+ d->chart_id_net_bytes = strdupz(d->name);
+ d->chart_id_net_compressed = strdupz(d->name);
+ d->chart_id_net_drops = strdupz(d->name);
+ d->chart_id_net_errors = strdupz(d->name);
+ d->chart_id_net_events = strdupz(d->name);
+ d->chart_id_net_fifo = strdupz(d->name);
+ d->chart_id_net_packets = strdupz(d->name);
+
+ d->chart_family = strdupz(d->name);
+ d->priority = 7000;
+
+ netdev_rename_lock(d);
+
netdev_added++;
// link it to the end
@@ -176,9 +440,7 @@ int do_proc_net_dev(int update_every, usec_t dt) {
do_compressed = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "compressed packets for all interfaces", CONFIG_BOOLEAN_AUTO);
do_events = config_get_boolean_ondemand("plugin:proc:/proc/net/dev", "frames, collisions, carrier counters for all interfaces", CONFIG_BOOLEAN_AUTO);
- disabled_list = simple_pattern_create(
- config_get("plugin:proc:/proc/net/dev", "disable by default interfaces matching", "lo fireqos* *-ifb")
- , SIMPLE_PATTERN_EXACT);
+ disabled_list = simple_pattern_create(config_get("plugin:proc:/proc/net/dev", "disable by default interfaces matching", "lo fireqos* *-ifb"), SIMPLE_PATTERN_EXACT);
}
if(unlikely(!ff)) {
@@ -191,6 +453,10 @@ int do_proc_net_dev(int update_every, usec_t dt) {
ff = procfile_readall(ff);
if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+ // rename all the devices, if we have pending renames
+ if(unlikely(netdev_pending_renames))
+ netdev_rename_all_lock();
+
netdev_found = 0;
size_t lines = procfile_lines(ff), l;
@@ -220,13 +486,13 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(d->enabled == CONFIG_BOOLEAN_NO)
continue;
- d->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth);
- d->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets);
- d->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors);
- d->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops);
- d->do_fifo = config_get_boolean_ondemand(var_name, "fifo", do_fifo);
+ d->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth);
+ d->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets);
+ d->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors);
+ d->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops);
+ d->do_fifo = config_get_boolean_ondemand(var_name, "fifo", do_fifo);
d->do_compressed = config_get_boolean_ondemand(var_name, "compressed", do_compressed);
- d->do_events = config_get_boolean_ondemand(var_name, "events", do_events);
+ d->do_events = config_get_boolean_ondemand(var_name, "events", do_events);
}
if(unlikely(!d->enabled))
@@ -278,20 +544,28 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_bandwidth)) {
d->st_bandwidth = rrdset_create_localhost(
- "net"
- , d->name
+ d->chart_type_net_bytes
+ , d->chart_id_net_bytes
, NULL
- , d->name
+ , d->chart_family
, "net.net"
, "Bandwidth"
, "kilobits/s"
- , 7000
+ , d->priority
, update_every
, RRDSET_TYPE_AREA
);
- d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rbytes;
+ d->rd_rbytes = d->rd_tbytes;
+ d->rd_tbytes = td;
+ }
}
else rrdset_next(d->st_bandwidth);
@@ -309,23 +583,31 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_packets)) {
d->st_packets = rrdset_create_localhost(
- "net_packets"
- , d->name
+ d->chart_type_net_packets
+ , d->chart_id_net_packets
, NULL
- , d->name
+ , d->chart_family
, "net.packets"
, "Packets"
, "packets/s"
- , 7001
+ , d->priority + 1
, update_every
, RRDSET_TYPE_LINE
);
rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL);
- d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rpackets;
+ d->rd_rpackets = d->rd_tpackets;
+ d->rd_tpackets = td;
+ }
}
else rrdset_next(d->st_packets);
@@ -344,22 +626,30 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_errors)) {
d->st_errors = rrdset_create_localhost(
- "net_errors"
- , d->name
+ d->chart_type_net_errors
+ , d->chart_id_net_errors
, NULL
- , d->name
+ , d->chart_family
, "net.errors"
, "Interface Errors"
, "errors/s"
- , 7002
+ , d->priority + 2
, update_every
, RRDSET_TYPE_LINE
);
rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL);
- d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rerrors;
+ d->rd_rerrors = d->rd_terrors;
+ d->rd_terrors = td;
+ }
}
else rrdset_next(d->st_errors);
@@ -377,22 +667,30 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_drops)) {
d->st_drops = rrdset_create_localhost(
- "net_drops"
- , d->name
+ d->chart_type_net_drops
+ , d->chart_id_net_drops
, NULL
- , d->name
+ , d->chart_family
, "net.drops"
, "Interface Drops"
, "drops/s"
- , 7003
+ , d->priority + 3
, update_every
, RRDSET_TYPE_LINE
);
rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL);
- d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rdrops;
+ d->rd_rdrops = d->rd_tdrops;
+ d->rd_tdrops = td;
+ }
}
else rrdset_next(d->st_drops);
@@ -410,22 +708,30 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_fifo)) {
d->st_fifo = rrdset_create_localhost(
- "net_fifo"
- , d->name
+ d->chart_type_net_fifo
+ , d->chart_id_net_fifo
, NULL
- , d->name
+ , d->chart_family
, "net.fifo"
, "Interface FIFO Buffer Errors"
, "errors"
- , 7004
+ , d->priority + 4
, update_every
, RRDSET_TYPE_LINE
);
rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL);
- d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rfifo;
+ d->rd_rfifo = d->rd_tfifo;
+ d->rd_tfifo = td;
+ }
}
else rrdset_next(d->st_fifo);
@@ -443,22 +749,30 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_compressed)) {
d->st_compressed = rrdset_create_localhost(
- "net_compressed"
- , d->name
+ d->chart_type_net_compressed
+ , d->chart_id_net_compressed
, NULL
- , d->name
+ , d->chart_family
, "net.compressed"
, "Compressed Packets"
, "packets/s"
- , 7005
+ , d->priority + 5
, update_every
, RRDSET_TYPE_LINE
);
rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL);
- d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rcompressed;
+ d->rd_rcompressed = d->rd_tcompressed;
+ d->rd_tcompressed = td;
+ }
}
else rrdset_next(d->st_compressed);
@@ -476,23 +790,23 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(unlikely(!d->st_events)) {
d->st_events = rrdset_create_localhost(
- "net_events"
- , d->name
+ d->chart_type_net_events
+ , d->chart_id_net_events
, NULL
- , d->name
+ , d->chart_family
, "net.events"
, "Network Interface Events"
, "events/s"
- , 7006
+ , d->priority + 6
, update_every
, RRDSET_TYPE_LINE
);
rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL);
- d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(d->st_events);
diff --git a/src/proc_net_ip_vs_stats.c b/src/proc_net_ip_vs_stats.c
index 16a3234df..aa806b460 100644
--- a/src/proc_net_ip_vs_stats.c
+++ b/src/proc_net_ip_vs_stats.c
@@ -80,8 +80,8 @@ int do_proc_net_ip_vs_stats(int update_every, usec_t dt) {
st = rrdset_create_localhost(RRD_TYPE_NET_IPVS, "net", NULL, RRD_TYPE_NET_IPVS, NULL, "IPVS Bandwidth"
, "kilobits/s", 3100, update_every, RRDSET_TYPE_AREA);
- rrddim_add(st, "received", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
diff --git a/src/proc_net_netstat.c b/src/proc_net_netstat.c
index 322e51d13..e01b81d28 100644
--- a/src/proc_net_netstat.c
+++ b/src/proc_net_netstat.c
@@ -232,151 +232,255 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
parse_line_pair(ff, arl_ipext, h, l);
- RRDSET *st;
-
// --------------------------------------------------------------------
if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (ipext_InOctets || ipext_OutOctets))) {
do_bandwidth = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("system.ipv4");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "ipv4", NULL, "network", NULL, "IPv4 Bandwidth", "kilobits/s"
- , 500, update_every, RRDSET_TYPE_AREA);
-
- rrddim_add(st, "InOctets", "received", 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutOctets", "sent", -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_system_ipv4 = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_system_ipv4)) {
+ st_system_ipv4 = rrdset_create_localhost(
+ "system"
+ , "ipv4"
+ , NULL
+ , "network"
+ , NULL
+ , "IPv4 Bandwidth"
+ , "kilobits/s"
+ , 500
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_system_ipv4, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_system_ipv4, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_system_ipv4);
- rrddim_set(st, "InOctets", ipext_InOctets);
- rrddim_set(st, "OutOctets", ipext_OutOctets);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_system_ipv4, rd_in, ipext_InOctets);
+ rrddim_set_by_pointer(st_system_ipv4, rd_out, ipext_OutOctets);
+
+ rrdset_done(st_system_ipv4);
}
// --------------------------------------------------------------------
if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && (ipext_InNoRoutes || ipext_InTruncatedPkts))) {
do_inerrors = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.inerrors");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "inerrors", NULL, "errors", NULL, "IPv4 Input Errors"
- , "packets/s", 4000, update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_ipv4_inerrors = NULL;
+ static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL;
+
+ if(unlikely(!st_ipv4_inerrors)) {
+ st_ipv4_inerrors = rrdset_create_localhost(
+ "ipv4"
+ , "inerrors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv4 Input Errors"
+ , "packets/s"
+ , 4000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ipv4_inerrors, RRDSET_FLAG_DETAIL);
+
+ rd_noroutes = rrddim_add(st_ipv4_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_truncated = rrddim_add(st_ipv4_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_checksum = rrddim_add(st_ipv4_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_ipv4_inerrors);
+
+ rrddim_set_by_pointer(st_ipv4_inerrors, rd_noroutes, ipext_InNoRoutes);
+ rrddim_set_by_pointer(st_ipv4_inerrors, rd_truncated, ipext_InTruncatedPkts);
+ rrddim_set_by_pointer(st_ipv4_inerrors, rd_checksum, ipext_InCsumErrors);
- rrddim_set(st, "InNoRoutes", ipext_InNoRoutes);
- rrddim_set(st, "InTruncatedPkts", ipext_InTruncatedPkts);
- rrddim_set(st, "InCsumErrors", ipext_InCsumErrors);
- rrdset_done(st);
+ rrdset_done(st_ipv4_inerrors);
}
// --------------------------------------------------------------------
if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets))) {
do_mcast = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.mcast");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "mcast", NULL, "multicast", NULL, "IPv4 Multicast Bandwidth"
- , "kilobits/s", 9000, update_every, RRDSET_TYPE_AREA);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InMcastOctets", "received", 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutMcastOctets", "sent", -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_ipv4_mcast = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ipv4_mcast)) {
+ st_ipv4_mcast = rrdset_create_localhost(
+ "ipv4"
+ , "mcast"
+ , NULL
+ , "multicast"
+ , NULL
+ , "IPv4 Multicast Bandwidth"
+ , "kilobits/s"
+ , 9000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_ipv4_mcast, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ipv4_mcast, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ipv4_mcast, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_ipv4_mcast);
- rrddim_set(st, "InMcastOctets", ipext_InMcastOctets);
- rrddim_set(st, "OutMcastOctets", ipext_OutMcastOctets);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_ipv4_mcast, rd_in, ipext_InMcastOctets);
+ rrddim_set_by_pointer(st_ipv4_mcast, rd_out, ipext_OutMcastOctets);
+
+ rrdset_done(st_ipv4_mcast);
}
// --------------------------------------------------------------------
if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (ipext_InBcastOctets || ipext_OutBcastOctets))) {
do_bcast = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.bcast");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "bcast", NULL, "broadcast", NULL, "IPv4 Broadcast Bandwidth"
- , "kilobits/s", 8000, update_every, RRDSET_TYPE_AREA);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InBcastOctets", "received", 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutBcastOctets", "sent", -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_ipv4_bcast = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ipv4_bcast)) {
+ st_ipv4_bcast = rrdset_create_localhost(
+ "ipv4"
+ , "bcast"
+ , NULL
+ , "broadcast"
+ , NULL
+ , "IPv4 Broadcast Bandwidth"
+ , "kilobits/s"
+ , 8000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_ipv4_bcast, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ipv4_bcast, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ipv4_bcast, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_ipv4_bcast);
+
+ rrddim_set_by_pointer(st_ipv4_bcast, rd_in, ipext_InBcastOctets);
+ rrddim_set_by_pointer(st_ipv4_bcast, rd_out, ipext_OutBcastOctets);
- rrddim_set(st, "InBcastOctets", ipext_InBcastOctets);
- rrddim_set(st, "OutBcastOctets", ipext_OutBcastOctets);
- rrdset_done(st);
+ rrdset_done(st_ipv4_bcast);
}
// --------------------------------------------------------------------
if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InMcastPkts || ipext_OutMcastPkts))) {
do_mcast_p = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.mcastpkts");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "mcastpkts", NULL, "multicast", NULL, "IPv4 Multicast Packets"
- , "packets/s", 8600, update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_ipv4_mcastpkts = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ipv4_mcastpkts)) {
+ st_ipv4_mcastpkts = rrdset_create_localhost(
+ "ipv4"
+ , "mcastpkts"
+ , NULL
+ , "multicast"
+ , NULL
+ , "IPv4 Multicast Packets"
+ , "packets/s"
+ , 8600
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ipv4_mcastpkts, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ipv4_mcastpkts, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ipv4_mcastpkts, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else rrdset_next(st_ipv4_mcastpkts);
+
+ rrddim_set_by_pointer(st_ipv4_mcastpkts, rd_in, ipext_InMcastPkts);
+ rrddim_set_by_pointer(st_ipv4_mcastpkts, rd_out, ipext_OutMcastPkts);
- rrddim_set(st, "InMcastPkts", ipext_InMcastPkts);
- rrddim_set(st, "OutMcastPkts", ipext_OutMcastPkts);
- rrdset_done(st);
+ rrdset_done(st_ipv4_mcastpkts);
}
// --------------------------------------------------------------------
if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InBcastPkts || ipext_OutBcastPkts))) {
do_bcast_p = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.bcastpkts");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "bcastpkts", NULL, "broadcast", NULL, "IPv4 Broadcast Packets"
- , "packets/s", 8500, update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_ipv4_bcastpkts = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ipv4_bcastpkts)) {
+ st_ipv4_bcastpkts = rrdset_create_localhost(
+ "ipv4"
+ , "bcastpkts"
+ , NULL
+ , "broadcast"
+ , NULL
+ , "IPv4 Broadcast Packets"
+ , "packets/s"
+ , 8500
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ipv4_bcastpkts, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ipv4_bcastpkts, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ipv4_bcastpkts, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_ipv4_bcastpkts);
- rrddim_set(st, "InBcastPkts", ipext_InBcastPkts);
- rrddim_set(st, "OutBcastPkts", ipext_OutBcastPkts);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_ipv4_bcastpkts, rd_in, ipext_InBcastPkts);
+ rrddim_set_by_pointer(st_ipv4_bcastpkts, rd_out, ipext_OutBcastPkts);
+
+ rrdset_done(st_ipv4_bcastpkts);
}
// --------------------------------------------------------------------
if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (ipext_InCEPkts || ipext_InECT0Pkts || ipext_InECT1Pkts || ipext_InNoECTPkts))) {
do_ecn = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.ecnpkts");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "ecnpkts", NULL, "ecn", NULL, "IPv4 ECN Statistics"
- , "packets/s", 8700, update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_ecnpkts = NULL;
+ static RRDDIM *rd_cep = NULL, *rd_noectp = NULL, *rd_ectp0 = NULL, *rd_ectp1 = NULL;
+
+ if(unlikely(!st_ecnpkts)) {
+ st_ecnpkts = rrdset_create_localhost(
+ "ipv4"
+ , "ecnpkts"
+ , NULL
+ , "ecn"
+ , NULL
+ , "IPv4 ECN Statistics"
+ , "packets/s"
+ , 8700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL);
+
+ rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ectp1 = rrddim_add(st_ecnpkts, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else rrdset_next(st_ecnpkts);
+
+ rrddim_set_by_pointer(st_ecnpkts, rd_cep, ipext_InCEPkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_noectp, ipext_InNoECTPkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_ectp0, ipext_InECT0Pkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_ectp1, ipext_InECT1Pkts);
- rrddim_set(st, "InCEPkts", ipext_InCEPkts);
- rrddim_set(st, "InNoECTPkts", ipext_InNoECTPkts);
- rrddim_set(st, "InECT0Pkts", ipext_InECT0Pkts);
- rrddim_set(st, "InECT1Pkts", ipext_InECT1Pkts);
- rrdset_done(st);
+ rrdset_done(st_ecnpkts);
}
}
else if(unlikely(hash == hash_tcpext && strcmp(key, "TcpExt") == 0)) {
@@ -390,117 +494,192 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
parse_line_pair(ff, arl_tcpext, h, l);
- RRDSET *st;
-
// --------------------------------------------------------------------
if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && (tcpext_TCPMemoryPressures))) {
do_tcpext_memory = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpmemorypressures");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "tcpmemorypressures", NULL, "tcp", NULL, "TCP Memory Pressures"
- , "events/s", 3000, update_every, RRDSET_TYPE_LINE);
- rrddim_add(st, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_tcpmemorypressures = NULL;
+ static RRDDIM *rd_pressures = NULL;
+
+ if(unlikely(!st_tcpmemorypressures)) {
+ st_tcpmemorypressures = rrdset_create_localhost(
+ "ipv4"
+ , "tcpmemorypressures"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Memory Pressures"
+ , "events/s"
+ , 3000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_pressures = rrddim_add(st_tcpmemorypressures, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_tcpmemorypressures);
+
+ rrddim_set_by_pointer(st_tcpmemorypressures, rd_pressures, tcpext_TCPMemoryPressures);
- rrddim_set(st, "TCPMemoryPressures", tcpext_TCPMemoryPressures);
- rrdset_done(st);
+ rrdset_done(st_tcpmemorypressures);
}
// --------------------------------------------------------------------
if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpext_TCPAbortOnData || tcpext_TCPAbortOnClose || tcpext_TCPAbortOnMemory || tcpext_TCPAbortOnTimeout || tcpext_TCPAbortOnLinger || tcpext_TCPAbortFailed))) {
do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpconnaborts");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "tcpconnaborts", NULL, "tcp", NULL, "TCP Connection Aborts"
- , "connections/s", 3010, update_every, RRDSET_TYPE_LINE);
-
- rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_tcpconnaborts = NULL;
+ static RRDDIM *rd_baddata = NULL, *rd_userclosed = NULL, *rd_nomemory = NULL, *rd_timeout = NULL, *rd_linger = NULL, *rd_failed = NULL;
+
+ if(unlikely(!st_tcpconnaborts)) {
+ st_tcpconnaborts = rrdset_create_localhost(
+ "ipv4"
+ , "tcpconnaborts"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Connection Aborts"
+ , "connections/s"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_baddata = rrddim_add(st_tcpconnaborts, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_userclosed = rrddim_add(st_tcpconnaborts, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_nomemory = rrddim_add(st_tcpconnaborts, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st_tcpconnaborts, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_linger = rrddim_add(st_tcpconnaborts, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_tcpconnaborts, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
-
- rrddim_set(st, "TCPAbortOnData", tcpext_TCPAbortOnData);
- rrddim_set(st, "TCPAbortOnClose", tcpext_TCPAbortOnClose);
- rrddim_set(st, "TCPAbortOnMemory", tcpext_TCPAbortOnMemory);
- rrddim_set(st, "TCPAbortOnTimeout", tcpext_TCPAbortOnTimeout);
- rrddim_set(st, "TCPAbortOnLinger", tcpext_TCPAbortOnLinger);
- rrddim_set(st, "TCPAbortFailed", tcpext_TCPAbortFailed);
- rrdset_done(st);
+ else
+ rrdset_next(st_tcpconnaborts);
+
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_baddata, tcpext_TCPAbortOnData);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_userclosed, tcpext_TCPAbortOnClose);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_nomemory, tcpext_TCPAbortOnMemory);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_timeout, tcpext_TCPAbortOnTimeout);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_linger, tcpext_TCPAbortOnLinger);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_failed, tcpext_TCPAbortFailed);
+
+ rrdset_done(st_tcpconnaborts);
}
+
// --------------------------------------------------------------------
if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && (tcpext_TCPRenoReorder || tcpext_TCPFACKReorder || tcpext_TCPSACKReorder || tcpext_TCPTSReorder))) {
do_tcpext_reorder = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpreorders");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "tcpreorders", NULL, "tcp", NULL
- , "TCP Reordered Packets by Detection Method", "packets/s", 3020
- , update_every, RRDSET_TYPE_LINE);
-
- rrddim_add(st, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_tcpreorders = NULL;
+ static RRDDIM *rd_timestamp = NULL, *rd_sack = NULL, *rd_fack = NULL, *rd_reno = NULL;
+
+ if(unlikely(!st_tcpreorders)) {
+ st_tcpreorders = rrdset_create_localhost(
+ "ipv4"
+ , "tcpreorders"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Reordered Packets by Detection Method"
+ , "packets/s"
+ , 3020
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_timestamp = rrddim_add(st_tcpreorders, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sack = rrddim_add(st_tcpreorders, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fack = rrddim_add(st_tcpreorders, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_reno = rrddim_add(st_tcpreorders, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_tcpreorders);
+
+ rrddim_set_by_pointer(st_tcpreorders, rd_timestamp, tcpext_TCPTSReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_sack, tcpext_TCPSACKReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_fack, tcpext_TCPFACKReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_reno, tcpext_TCPRenoReorder);
- rrddim_set(st, "TCPTSReorder", tcpext_TCPTSReorder);
- rrddim_set(st, "TCPSACKReorder", tcpext_TCPSACKReorder);
- rrddim_set(st, "TCPFACKReorder", tcpext_TCPFACKReorder);
- rrddim_set(st, "TCPRenoReorder", tcpext_TCPRenoReorder);
- rrdset_done(st);
+ rrdset_done(st_tcpreorders);
}
// --------------------------------------------------------------------
if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && (tcpext_TCPOFOQueue || tcpext_TCPOFODrop || tcpext_TCPOFOMerge))) {
do_tcpext_ofo = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpofo");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "tcpofo", NULL, "tcp", NULL, "TCP Out-Of-Order Queue"
- , "packets/s", 3050, update_every, RRDSET_TYPE_LINE);
-
- rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_ipv4_tcpofo = NULL;
+ static RRDDIM *rd_inqueue = NULL, *rd_dropped = NULL, *rd_merged = NULL, *rd_pruned = NULL;
+
+ if(unlikely(!st_ipv4_tcpofo)) {
+
+ st_ipv4_tcpofo = rrdset_create_localhost(
+ "ipv4"
+ , "tcpofo"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Out-Of-Order Queue"
+ , "packets/s"
+ , 3050
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inqueue = rrddim_add(st_ipv4_tcpofo, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dropped = rrddim_add(st_ipv4_tcpofo, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_merged = rrddim_add(st_ipv4_tcpofo, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pruned = rrddim_add(st_ipv4_tcpofo, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_ipv4_tcpofo);
- rrddim_set(st, "TCPOFOQueue", tcpext_TCPOFOQueue);
- rrddim_set(st, "TCPOFODrop", tcpext_TCPOFODrop);
- rrddim_set(st, "TCPOFOMerge", tcpext_TCPOFOMerge);
- rrddim_set(st, "OfoPruned", tcpext_OfoPruned);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_ipv4_tcpofo, rd_inqueue, tcpext_TCPOFOQueue);
+ rrddim_set_by_pointer(st_ipv4_tcpofo, rd_dropped, tcpext_TCPOFODrop);
+ rrddim_set_by_pointer(st_ipv4_tcpofo, rd_merged, tcpext_TCPOFOMerge);
+ rrddim_set_by_pointer(st_ipv4_tcpofo, rd_pruned, tcpext_OfoPruned);
+
+ rrdset_done(st_ipv4_tcpofo);
}
// --------------------------------------------------------------------
if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpext_SyncookiesSent || tcpext_SyncookiesRecv || tcpext_SyncookiesFailed))) {
do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpsyncookies");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("ipv4", "tcpsyncookies", NULL, "tcp", NULL, "TCP SYN Cookies"
- , "packets/s", 3100, update_every, RRDSET_TYPE_LINE);
-
- rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ static RRDSET *st_syncookies = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_failed = NULL;
+
+ if(unlikely(!st_syncookies)) {
+
+ st_syncookies = rrdset_create_localhost(
+ "ipv4"
+ , "tcpsyncookies"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Cookies"
+ , "packets/s"
+ , 3100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st_syncookies, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st_syncookies, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_syncookies, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st_syncookies);
+
+ rrddim_set_by_pointer(st_syncookies, rd_received, tcpext_SyncookiesRecv);
+ rrddim_set_by_pointer(st_syncookies, rd_sent, tcpext_SyncookiesSent);
+ rrddim_set_by_pointer(st_syncookies, rd_failed, tcpext_SyncookiesFailed);
- rrddim_set(st, "SyncookiesRecv", tcpext_SyncookiesRecv);
- rrddim_set(st, "SyncookiesSent", tcpext_SyncookiesSent);
- rrddim_set(st, "SyncookiesFailed", tcpext_SyncookiesFailed);
- rrdset_done(st);
+ rrdset_done(st_syncookies);
}
}
diff --git a/src/proc_net_snmp.c b/src/proc_net_snmp.c
index 7c0fd9b4a..817c964b4 100644
--- a/src/proc_net_snmp.c
+++ b/src/proc_net_snmp.c
@@ -554,7 +554,7 @@ int do_proc_net_snmp(int update_every, usec_t dt) {
st = rrdset_find_localhost(RRD_TYPE_NET_SNMP ".icmpmsg");
if(!st) {
- st = rrdset_create_localhost(RRD_TYPE_NET_SNMP, "icmpmsg", NULL, "icmp", NULL, "IPv4 ICMP Messsages"
+ st = rrdset_create_localhost(RRD_TYPE_NET_SNMP, "icmpmsg", NULL, "icmp", NULL, "IPv4 ICMP Messages"
, "packets/s", 2604, update_every, RRDSET_TYPE_LINE);
for(i = 0; icmpmsg_data[i].name ;i++)
diff --git a/src/proc_net_snmp6.c b/src/proc_net_snmp6.c
index aa9ab2209..6649b7afe 100644
--- a/src/proc_net_snmp6.c
+++ b/src/proc_net_snmp6.c
@@ -283,8 +283,8 @@ int do_proc_net_snmp6(int update_every, usec_t dt) {
st = rrdset_create_localhost("system", "ipv6", NULL, "network", NULL, "IPv6 Bandwidth", "kilobits/s", 500
, update_every, RRDSET_TYPE_AREA);
- rrddim_add(st, "received", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
@@ -536,8 +536,8 @@ int do_proc_net_snmp6(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA);
rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
- rrddim_add(st, "received", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
@@ -557,8 +557,8 @@ int do_proc_net_snmp6(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA);
rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
- rrddim_add(st, "received", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
diff --git a/src/proc_softirqs.c b/src/proc_softirqs.c
index 560e2acb2..352407a4f 100644
--- a/src/proc_softirqs.c
+++ b/src/proc_softirqs.c
@@ -124,77 +124,120 @@ int do_proc_softirqs(int update_every, usec_t dt) {
irr->used = 1;
}
- RRDSET *st;
-
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("system", "softirqs");
- if(unlikely(!st)) st = rrdset_create_localhost("system", "softirqs", NULL, "softirqs", NULL, "System softirqs"
- , "softirqs/s", 950, update_every, RRDSET_TYPE_STACKED);
- else rrdset_next(st);
+ static RRDSET *st_system_softirqs = NULL;
+ if(unlikely(!st_system_softirqs))
+ st_system_softirqs = rrdset_create_localhost(
+ "system"
+ , "softirqs"
+ , NULL
+ , "softirqs"
+ , NULL
+ , "System softirqs"
+ , "softirqs/s"
+ , 950
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_system_softirqs);
for(l = 0; l < lines ;l++) {
struct interrupt *irr = irrindex(irrs, l, cpus);
+
if(unlikely(!irr->used)) continue;
+
// some interrupt may have changed without changing the total number of lines
// if the same number of interrupts have been added and removed between two
// calls of this function.
if(unlikely(!irr->rd || strncmp(irr->name, irr->rd->name, MAX_INTERRUPT_NAME) != 0)) {
- irr->rd = rrddim_find(st, irr->id);
+ irr->rd = rrddim_find(st_system_softirqs, irr->id);
+
if(unlikely(!irr->rd))
- irr->rd = rrddim_add(st, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ irr->rd = rrddim_add(st_system_softirqs, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
else
- rrddim_set_name(st, irr->rd, irr->name);
+ rrddim_set_name(st_system_softirqs, irr->rd, irr->name);
// also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
if(likely(do_per_core)) {
int c;
- for (c = 0; c < cpus ;c++)
- irr->cpu[c].rd = NULL;
+ for (c = 0; c < cpus ;c++) irr->cpu[c].rd = NULL;
}
}
- rrddim_set_by_pointer(st, irr->rd, irr->total);
+
+ rrddim_set_by_pointer(st_system_softirqs, irr->rd, irr->total);
}
- rrdset_done(st);
+
+ rrdset_done(st_system_softirqs);
+
+ // --------------------------------------------------------------------
if(do_per_core) {
+ static RRDSET **core_st = NULL;
+ static int old_cpus = 0;
+
+ if(old_cpus < cpus) {
+ core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
+ memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
+ old_cpus = cpus;
+ }
+
int c;
for(c = 0; c < cpus ; c++) {
- char id[50+1];
- snprintfz(id, 50, "cpu%d_softirqs", c);
-
- st = rrdset_find_bytype_localhost("cpu", id);
- if(unlikely(!st)) {
- // find if everything is zero
- unsigned long long core_sum = 0 ;
- for(l = 0; l < lines ;l++) {
+ if(unlikely(!core_st[c])) {
+ // find if everything is just zero
+ unsigned long long core_sum = 0;
+
+ for (l = 0; l < lines; l++) {
struct interrupt *irr = irrindex(irrs, l, cpus);
- if(unlikely(!irr->used)) continue;
+ if (unlikely(!irr->used)) continue;
core_sum += irr->cpu[c].value;
}
- if(unlikely(core_sum == 0)) continue; // try next core
- char title[100+1];
+ if (unlikely(core_sum == 0)) continue; // try next core
+
+ char id[50 + 1];
+ snprintfz(id, 50, "cpu%d_softirqs", c);
+
+ char title[100 + 1];
snprintfz(title, 100, "CPU%d softirqs", c);
- st = rrdset_create_localhost("cpu", id, NULL, "softirqs", "cpu.softirqs", title, "softirqs/s", 3000 + c
- , update_every, RRDSET_TYPE_STACKED);
+
+ core_st[c] = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "softirqs"
+ , "cpu.softirqs"
+ , title
+ , "softirqs/s"
+ , 3000 + c
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
}
- else rrdset_next(st);
+ else
+ rrdset_next(core_st[c]);
for(l = 0; l < lines ;l++) {
struct interrupt *irr = irrindex(irrs, l, cpus);
+
if(unlikely(!irr->used)) continue;
+
if(unlikely(!irr->cpu[c].rd)) {
- irr->cpu[c].rd = rrddim_find(st, irr->id);
+ irr->cpu[c].rd = rrddim_find(core_st[c], irr->id);
+
if(unlikely(!irr->cpu[c].rd))
- irr->cpu[c].rd = rrddim_add(st, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
else
- rrddim_set_name(st, irr->cpu[c].rd, irr->name);
+ rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name);
}
- rrddim_set_by_pointer(st, irr->cpu[c].rd, irr->cpu[c].value);
+
+ rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
}
- rrdset_done(st);
+
+ rrdset_done(core_st[c]);
}
}
diff --git a/src/proc_stat.c b/src/proc_stat.c
index 04f0896cd..40bf5cfad 100644
--- a/src/proc_stat.c
+++ b/src/proc_stat.c
@@ -1,25 +1,171 @@
#include "common.h"
+struct per_core_single_number_file {
+ char found:1;
+ const char *filename;
+ int fd;
+ collected_number value;
+ RRDDIM *rd;
+};
+
+#define CORE_THROTTLE_COUNT_INDEX 0
+#define PACKAGE_THROTTLE_COUNT_INDEX 1
+#define SCALING_CUR_FREQ_INDEX 2
+#define PER_CORE_FILES 3
+
+struct cpu_chart {
+ const char *id;
+
+ RRDSET *st;
+ RRDDIM *rd_user;
+ RRDDIM *rd_nice;
+ RRDDIM *rd_system;
+ RRDDIM *rd_idle;
+ RRDDIM *rd_iowait;
+ RRDDIM *rd_irq;
+ RRDDIM *rd_softirq;
+ RRDDIM *rd_steal;
+ RRDDIM *rd_guest;
+ RRDDIM *rd_guest_nice;
+
+ struct per_core_single_number_file files[PER_CORE_FILES];
+};
+
+static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+
+static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
+ char buf[50 + 1];
+ size_t x, files_read = 0, files_nonzero = 0;
+
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+
+ f->found = 0;
+
+ if(unlikely(!f->filename))
+ continue;
+
+ if(unlikely(f->fd == -1)) {
+ f->fd = open(f->filename, O_RDONLY);
+ if (unlikely(f->fd == -1)) {
+ error("Cannot open file '%s'", f->filename);
+ continue;
+ }
+ }
+
+ ssize_t ret = read(f->fd, buf, 50);
+ if(unlikely(ret == -1)) {
+ // cannot read that file
+
+ error("Cannot read file '%s'", f->filename);
+ close(f->fd);
+ f->fd = -1;
+ continue;
+ }
+ else {
+ // successful read
+
+ // terminate the buffer
+ buf[ret] = '\0';
+
+ if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
+ close(f->fd);
+ f->fd = -1;
+ }
+ else if(lseek(f->fd, 0, SEEK_SET) == -1) {
+ error("Cannot seek in file '%s'", f->filename);
+ close(f->fd);
+ f->fd = -1;
+ }
+ }
+
+ files_read++;
+ f->found = 1;
+
+ f->value = str2ll(buf, NULL);
+ // info("read '%s', parsed as " COLLECTED_NUMBER_FORMAT, buf, f->value);
+ if(likely(f->value != 0))
+ files_nonzero++;
+ }
+
+ if(files_read == 0)
+ return -1;
+
+ if(files_nonzero == 0)
+ return 0;
+
+ return (int)files_nonzero;
+}
+
+static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index, RRDSET *st, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) {
+ size_t x;
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+
+ if(unlikely(!f->found))
+ continue;
+
+ if(unlikely(!f->rd))
+ f->rd = rrddim_add(st, all_cpu_charts[x].id, NULL, multiplier, divisor, algorithm);
+
+ rrddim_set_by_pointer(st, f->rd, f->value);
+ }
+}
+
int do_proc_stat(int update_every, usec_t dt) {
(void)dt;
+ static struct cpu_chart *all_cpu_charts = NULL;
+ static size_t all_cpu_charts_size = 0;
static procfile *ff = NULL;
- static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1;
+ static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_scaling_cur_freq = -1;
static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
+ static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL;
if(unlikely(do_cpu == -1)) {
- do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", 1);
- do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", 1);
- do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", 1);
- do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", 1);
- do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", 1);
- do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", 1);
+ do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", CONFIG_BOOLEAN_YES);
+ do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", CONFIG_BOOLEAN_YES);
+ do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", CONFIG_BOOLEAN_YES);
+ do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", CONFIG_BOOLEAN_YES);
+ do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", CONFIG_BOOLEAN_YES);
+ do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", CONFIG_BOOLEAN_YES);
+
+ // give sane defaults based on the number of processors
+ if(processors > 50) {
+ // the system has too many processors
+ keep_per_core_fds_open = CONFIG_BOOLEAN_NO;
+ do_core_throttle_count = CONFIG_BOOLEAN_NO;
+ do_package_throttle_count = CONFIG_BOOLEAN_NO;
+ do_scaling_cur_freq = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // the system has a reasonable number of processors
+ keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+ do_core_throttle_count = CONFIG_BOOLEAN_AUTO;
+ do_package_throttle_count = CONFIG_BOOLEAN_NO;
+ do_scaling_cur_freq = CONFIG_BOOLEAN_NO;
+ }
+
+ keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open);
+ do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count);
+ do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count);
+ do_scaling_cur_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "scaling_cur_freq", do_scaling_cur_freq);
hash_intr = simple_hash("intr");
hash_ctxt = simple_hash("ctxt");
hash_processes = simple_hash("processes");
hash_procs_running = simple_hash("procs_running");
hash_procs_blocked = simple_hash("procs_blocked");
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count");
+ core_throttle_count_filename = config_get("plugin:proc:/proc/stat", "core_throttle_count filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count");
+ package_throttle_count_filename = config_get("plugin:proc:/proc/stat", "package_throttle_count filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq");
+ scaling_cur_freq_filename = config_get("plugin:proc:/proc/stat", "scaling_cur_freq filename to monitor", filename);
}
if(unlikely(!ff)) {
@@ -36,7 +182,6 @@ int do_proc_stat(int update_every, usec_t dt) {
size_t words;
unsigned long long processes = 0, running = 0 , blocked = 0;
- RRDSET *st;
for(l = 0; l < lines ;l++) {
char *row_key = procfile_lineword(ff, l, 0);
@@ -50,120 +195,189 @@ int do_proc_stat(int update_every, usec_t dt) {
continue;
}
- char *id;
- unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
-
- id = row_key;
- user = str2ull(procfile_lineword(ff, l, 1));
- nice = str2ull(procfile_lineword(ff, l, 2));
- system = str2ull(procfile_lineword(ff, l, 3));
- idle = str2ull(procfile_lineword(ff, l, 4));
- iowait = str2ull(procfile_lineword(ff, l, 5));
- irq = str2ull(procfile_lineword(ff, l, 6));
- softirq = str2ull(procfile_lineword(ff, l, 7));
- steal = str2ull(procfile_lineword(ff, l, 8));
-
- guest = str2ull(procfile_lineword(ff, l, 9));
- user -= guest;
-
- guest_nice = str2ull(procfile_lineword(ff, l, 10));
- nice -= guest_nice;
-
- char *title, *type, *context, *family;
- long priority;
- int isthistotal;
-
- if(unlikely(strcmp(id, "cpu")) == 0) {
- title = "Total CPU utilization";
- type = "system";
- context = "system.cpu";
- family = id;
- priority = 100;
- isthistotal = 1;
- }
- else {
- title = "Core utilization";
- type = "cpu";
- context = "cpu.cpu";
- family = "utilization";
- priority = 1000;
- isthistotal = 0;
- }
+ size_t core = (row_key[3] == '\0') ? 0 : str2ul(&row_key[3]) + 1;
+
+ if(likely((core == 0 && do_cpu) || (core > 0 && do_cpu_cores))) {
+ char *id;
+ unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
- if(likely((isthistotal && do_cpu) || (!isthistotal && do_cpu_cores))) {
- st = rrdset_find_bytype_localhost(type, id);
- if(unlikely(!st)) {
- st = rrdset_create_localhost(type, id, NULL, family, context, title, "percentage", priority
- , update_every, RRDSET_TYPE_STACKED);
+ id = row_key;
+ user = str2ull(procfile_lineword(ff, l, 1));
+ nice = str2ull(procfile_lineword(ff, l, 2));
+ system = str2ull(procfile_lineword(ff, l, 3));
+ idle = str2ull(procfile_lineword(ff, l, 4));
+ iowait = str2ull(procfile_lineword(ff, l, 5));
+ irq = str2ull(procfile_lineword(ff, l, 6));
+ softirq = str2ull(procfile_lineword(ff, l, 7));
+ steal = str2ull(procfile_lineword(ff, l, 8));
+
+ guest = str2ull(procfile_lineword(ff, l, 9));
+ user -= guest;
+
+ guest_nice = str2ull(procfile_lineword(ff, l, 10));
+ nice -= guest_nice;
+
+ char *title, *type, *context, *family;
+ long priority;
+
+ if(core >= all_cpu_charts_size) {
+ size_t old_cpu_charts_size = all_cpu_charts_size;
+ all_cpu_charts_size = core + 1;
+ all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * all_cpu_charts_size);
+ memset(&all_cpu_charts[old_cpu_charts_size], 0, sizeof(struct cpu_chart) * (all_cpu_charts_size - old_cpu_charts_size));
+ }
+ struct cpu_chart *cpu_chart = &all_cpu_charts[core];
+
+ if(unlikely(!cpu_chart->st)) {
+ cpu_chart->id = strdupz(id);
+
+ if(core == 0) {
+ title = "Total CPU utilization";
+ type = "system";
+ context = "system.cpu";
+ family = id;
+ priority = 100;
+ }
+ else {
+ title = "Core utilization";
+ type = "cpu";
+ context = "cpu.cpu";
+ family = "utilization";
+ priority = 1000;
+
+ // FIXME: check for /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq
+ // FIXME: check for /sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state
+
+ char filename[FILENAME_MAX + 1];
+ struct stat stbuf;
+
+ if(do_core_throttle_count != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, core_throttle_count_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
+ cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].fd = -1;
+ do_core_throttle_count = CONFIG_BOOLEAN_YES;
+ }
+ }
+
+ if(do_package_throttle_count != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, package_throttle_count_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
+ cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].fd = -1;
+ do_package_throttle_count = CONFIG_BOOLEAN_YES;
+ }
+ }
+
+ if(do_scaling_cur_freq != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[SCALING_CUR_FREQ_INDEX].filename = strdupz(filename);
+ cpu_chart->files[SCALING_CUR_FREQ_INDEX].fd = -1;
+ do_scaling_cur_freq = CONFIG_BOOLEAN_YES;
+ }
+ }
+ }
+
+ cpu_chart->st = rrdset_create_localhost(
+ type
+ , id
+ , NULL
+ , family
+ , context
+ , title
+ , "percentage"
+ , priority
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
long multiplier = 1;
long divisor = 1; // sysconf(_SC_CLK_TCK);
- rrddim_add(st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
-
- rrddim_add(st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_hide(st, "idle");
+ cpu_chart->rd_guest_nice = rrddim_add(cpu_chart->st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_guest = rrddim_add(cpu_chart->st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_steal = rrddim_add(cpu_chart->st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_softirq = rrddim_add(cpu_chart->st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_irq = rrddim_add(cpu_chart->st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_user = rrddim_add(cpu_chart->st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_system = rrddim_add(cpu_chart->st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_nice = rrddim_add(cpu_chart->st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_iowait = rrddim_add(cpu_chart->st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_idle = rrddim_add(cpu_chart->st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(cpu_chart->st, "idle");
}
- else rrdset_next(st);
-
- rrddim_set(st, "user", user);
- rrddim_set(st, "nice", nice);
- rrddim_set(st, "system", system);
- rrddim_set(st, "idle", idle);
- rrddim_set(st, "iowait", iowait);
- rrddim_set(st, "irq", irq);
- rrddim_set(st, "softirq", softirq);
- rrddim_set(st, "steal", steal);
- rrddim_set(st, "guest", guest);
- rrddim_set(st, "guest_nice", guest_nice);
- rrdset_done(st);
+ else rrdset_next(cpu_chart->st);
+
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_user, user);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_nice, nice);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_system, system);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_idle, idle);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_iowait, iowait);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_irq, irq);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_softirq, softirq);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_steal, steal);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest, guest);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest_nice, guest_nice);
+ rrdset_done(cpu_chart->st);
}
}
else if(unlikely(hash == hash_intr && strcmp(row_key, "intr") == 0)) {
- unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
-
- // --------------------------------------------------------------------
-
if(likely(do_interrupts)) {
- st = rrdset_find_bytype_localhost("system", "intr");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "intr", NULL, "interrupts", NULL, "CPU Interrupts"
- , "interrupts/s", 900, update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_intr = NULL;
+ static RRDDIM *rd_interrupts = NULL;
+ unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
+
+ if(unlikely(!st_intr)) {
+ st_intr = rrdset_create_localhost(
+ "system"
+ , "intr"
+ , NULL
+ , "interrupts"
+ , NULL
+ , "CPU Interrupts"
+ , "interrupts/s"
+ , 900
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
+
+ rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else rrdset_next(st_intr);
- rrddim_set(st, "interrupts", value);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_intr, rd_interrupts, value);
+ rrdset_done(st_intr);
}
}
else if(unlikely(hash == hash_ctxt && strcmp(row_key, "ctxt") == 0)) {
- unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
-
- // --------------------------------------------------------------------
-
if(likely(do_context)) {
- st = rrdset_find_bytype_localhost("system", "ctxt");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "ctxt", NULL, "processes", NULL, "CPU Context Switches"
- , "context switches/s", 800, update_every, RRDSET_TYPE_LINE);
-
- rrddim_add(st, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_ctxt = NULL;
+ static RRDDIM *rd_switches = NULL;
+ unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
+
+ if(unlikely(!st_ctxt)) {
+ st_ctxt = rrdset_create_localhost(
+ "system"
+ , "ctxt"
+ , NULL
+ , "processes"
+ , NULL
+ , "CPU Context Switches"
+ , "context switches/s"
+ , 800
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else rrdset_next(st_ctxt);
- rrddim_set(st, "switches", value);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_ctxt, rd_switches, value);
+ rrdset_done(st_ctxt);
}
}
else if(unlikely(hash == hash_processes && !processes && strcmp(row_key, "processes") == 0)) {
@@ -180,36 +394,147 @@ int do_proc_stat(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if(likely(do_forks)) {
- st = rrdset_find_bytype_localhost("system", "forks");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "forks", NULL, "processes", NULL, "Started Processes", "processes/s"
- , 700, update_every, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ static RRDSET *st_forks = NULL;
+ static RRDDIM *rd_started = NULL;
+
+ if(unlikely(!st_forks)) {
+ st_forks = rrdset_create_localhost(
+ "system"
+ , "forks"
+ , NULL
+ , "processes"
+ , NULL
+ , "Started Processes"
+ , "processes/s"
+ , 700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL);
+
+ rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
- else rrdset_next(st);
+ else rrdset_next(st_forks);
- rrddim_set(st, "started", processes);
- rrdset_done(st);
+ rrddim_set_by_pointer(st_forks, rd_started, processes);
+ rrdset_done(st_forks);
}
// --------------------------------------------------------------------
if(likely(do_processes)) {
- st = rrdset_find_bytype_localhost("system", "processes");
- if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "processes", NULL, "processes", NULL, "System Processes", "processes"
- , 600, update_every, RRDSET_TYPE_LINE);
+ static RRDSET *st_processes = NULL;
+ static RRDDIM *rd_running = NULL;
+ static RRDDIM *rd_blocked = NULL;
+
+ if(unlikely(!st_processes)) {
+ st_processes = rrdset_create_localhost(
+ "system"
+ , "processes"
+ , NULL
+ , "processes"
+ , NULL
+ , "System Processes"
+ , "processes"
+ , 600
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_processes);
+
+ rrddim_set_by_pointer(st_processes, rd_running, running);
+ rrddim_set_by_pointer(st_processes, rd_blocked, blocked);
+ rrdset_done(st_processes);
+ }
- rrddim_add(st, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ if(likely(all_cpu_charts_size > 1)) {
+ if(likely(do_core_throttle_count != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX);
+ if(likely(r != -1 && (do_core_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_core_throttle_count = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_core_throttle_count = NULL;
+
+ if (unlikely(!st_core_throttle_count))
+ st_core_throttle_count = rrdset_create_localhost(
+ "cpu"
+ , "core_throttling"
+ , NULL
+ , "throttling"
+ , "cpu.core_throttling"
+ , "Core Thermal Throttling Events"
+ , "events/s"
+ , 5001
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_core_throttle_count);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX, st_core_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_done(st_core_throttle_count);
+ }
}
- else rrdset_next(st);
- rrddim_set(st, "running", running);
- rrddim_set(st, "blocked", blocked);
- rrdset_done(st);
+ if(likely(do_package_throttle_count != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX);
+ if(likely(r != -1 && (do_package_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_package_throttle_count = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_package_throttle_count = NULL;
+
+ if(unlikely(!st_package_throttle_count))
+ st_package_throttle_count = rrdset_create_localhost(
+ "cpu"
+ , "package_throttling"
+ , NULL
+ , "throttling"
+ , "cpu.package_throttling"
+ , "Package Thermal Throttling Events"
+ , "events/s"
+ , 5002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_package_throttle_count);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX, st_package_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_done(st_package_throttle_count);
+ }
+ }
+
+ if(likely(do_scaling_cur_freq != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX);
+ if(likely(r != -1 && (do_scaling_cur_freq == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_scaling_cur_freq = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_scaling_cur_freq = NULL;
+
+ if(unlikely(!st_scaling_cur_freq))
+ st_scaling_cur_freq = rrdset_create_localhost(
+ "cpu"
+ , "scaling_cur_freq"
+ , NULL
+ , "cpufreq"
+ , "cpu.scaling_cur_freq"
+ , "Per CPU Core, Current CPU Scaling Frequency"
+ , "MHz"
+ , 5003
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_scaling_cur_freq);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX, st_scaling_cur_freq, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrdset_done(st_scaling_cur_freq);
+ }
+ }
}
return 0;
diff --git a/src/proc_sys_kernel_random_entropy_avail.c b/src/proc_sys_kernel_random_entropy_avail.c
index fea8900d3..267ea271a 100644
--- a/src/proc_sys_kernel_random_entropy_avail.c
+++ b/src/proc_sys_kernel_random_entropy_avail.c
@@ -17,15 +17,28 @@ int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt) {
unsigned long long entropy = str2ull(procfile_lineword(ff, 0, 0));
- RRDSET *st = rrdset_find_bytype_localhost("system", "entropy");
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "entropy", NULL, "entropy", NULL, "Available Entropy", "entropy", 1000
- , update_every, RRDSET_TYPE_LINE);
- rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ st = rrdset_create_localhost(
+ "system"
+ , "entropy"
+ , NULL
+ , "entropy"
+ , NULL
+ , "Available Entropy"
+ , "entropy"
+ , 1000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
else rrdset_next(st);
- rrddim_set(st, "entropy", entropy);
+ rrddim_set_by_pointer(st, rd, entropy);
rrdset_done(st);
return 0;
diff --git a/src/proc_uptime.c b/src/proc_uptime.c
index f74cccb97..fb05b5cb3 100644
--- a/src/proc_uptime.c
+++ b/src/proc_uptime.c
@@ -3,7 +3,6 @@
int do_proc_uptime(int update_every, usec_t dt) {
(void)dt;
- static RRDSET *st = NULL;
collected_number uptime = 0;
#ifdef CLOCK_BOOTTIME_IS_AVAILABLE
@@ -38,17 +37,31 @@ int do_proc_uptime(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- if(unlikely(!st))
- st = rrdset_find_localhost("system.uptime");
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "uptime", NULL, "uptime", NULL, "System Uptime", "seconds", 1000
- , update_every, RRDSET_TYPE_LINE);
- rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+
+ st = rrdset_create_localhost(
+ "system"
+ , "uptime"
+ , NULL
+ , "uptime"
+ , NULL
+ , "System Uptime"
+ , "seconds"
+ , 1000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
}
- else rrdset_next(st);
+ else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, uptime);
- rrddim_set(st, "uptime", uptime);
rrdset_done(st);
return 0;
diff --git a/src/proc_vmstat.c b/src/proc_vmstat.c
index a2416313a..2382116f1 100644
--- a/src/proc_vmstat.c
+++ b/src/proc_vmstat.c
@@ -91,17 +91,29 @@ int do_proc_vmstat(int update_every, usec_t dt) {
do_swapio = CONFIG_BOOLEAN_YES;
static RRDSET *st_swapio = NULL;
- if(unlikely(!st_swapio)) {
- st_swapio = rrdset_create_localhost("system", "swapio", NULL, "swap", NULL, "Swap I/O", "kilobytes/s", 250
- , update_every, RRDSET_TYPE_AREA);
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
- rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
+ if(unlikely(!st_swapio)) {
+ st_swapio = rrdset_create_localhost(
+ "system"
+ , "swapio"
+ , NULL
+ , "swap"
+ , NULL
+ , "Swap I/O"
+ , "kilobytes/s"
+ , 250
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st_swapio);
- rrddim_set(st_swapio, "in", pswpin);
- rrddim_set(st_swapio, "out", pswpout);
+ rrddim_set_by_pointer(st_swapio, rd_in, pswpin);
+ rrddim_set_by_pointer(st_swapio, rd_out, pswpout);
rrdset_done(st_swapio);
}
@@ -109,17 +121,29 @@ int do_proc_vmstat(int update_every, usec_t dt) {
if(do_io) {
static RRDSET *st_io = NULL;
- if(unlikely(!st_io)) {
- st_io = rrdset_create_localhost("system", "io", NULL, "disk", NULL, "Disk I/O", "kilobytes/s", 150
- , update_every, RRDSET_TYPE_AREA);
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
- rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if(unlikely(!st_io)) {
+ st_io = rrdset_create_localhost(
+ "system"
+ , "io"
+ , NULL
+ , "disk"
+ , NULL
+ , "Disk I/O"
+ , "kilobytes/s"
+ , 150
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st_io);
- rrddim_set(st_io, "in", pgpgin);
- rrddim_set(st_io, "out", pgpgout);
+ rrddim_set_by_pointer(st_io, rd_in, pgpgin);
+ rrddim_set_by_pointer(st_io, rd_out, pgpgout);
rrdset_done(st_io);
}
@@ -127,18 +151,31 @@ int do_proc_vmstat(int update_every, usec_t dt) {
if(do_pgfaults) {
static RRDSET *st_pgfaults = NULL;
+ static RRDDIM *rd_minor = NULL, *rd_major = NULL;
+
if(unlikely(!st_pgfaults)) {
- st_pgfaults = rrdset_create_localhost("mem", "pgfaults", NULL, "system", NULL, "Memory Page Faults"
- , "page faults/s", 500, update_every, RRDSET_TYPE_LINE);
+ st_pgfaults = rrdset_create_localhost(
+ "mem"
+ , "pgfaults"
+ , NULL
+ , "system"
+ , NULL
+ , "Memory Page Faults"
+ , "page faults/s"
+ , 500
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL);
- rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st_pgfaults);
- rrddim_set(st_pgfaults, "minor", pgfault);
- rrddim_set(st_pgfaults, "major", pgmajfault);
+ rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault);
+ rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault);
rrdset_done(st_pgfaults);
}
@@ -149,6 +186,7 @@ int do_proc_vmstat(int update_every, usec_t dt) {
// single-node systems have uninteresting statistics (since all accesses
// are local).
if(unlikely(has_numa == -1))
+
has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates ||
numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0;
@@ -156,37 +194,50 @@ int do_proc_vmstat(int update_every, usec_t dt) {
do_numa = CONFIG_BOOLEAN_YES;
static RRDSET *st_numa = NULL;
+ static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL;
+
if(unlikely(!st_numa)) {
- st_numa = rrdset_create_localhost("mem", "numa", NULL, "numa", NULL, "NUMA events", "events/s", 800
- , update_every, RRDSET_TYPE_LINE);
+ st_numa = rrdset_create_localhost(
+ "mem"
+ , "numa"
+ , NULL
+ , "numa"
+ , NULL
+ , "NUMA events"
+ , "events/s"
+ , 800
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL);
// These depend on CONFIG_NUMA in the kernel.
- rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
// The following stats depend on CONFIG_NUMA_BALANCING in the
// kernel.
- rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st_numa);
- rrddim_set(st_numa, "local", numa_local);
- rrddim_set(st_numa, "foreign", numa_foreign);
- rrddim_set(st_numa, "interleave", numa_interleave);
- rrddim_set(st_numa, "other", numa_other);
+ rrddim_set_by_pointer(st_numa, rd_local, numa_local);
+ rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign);
+ rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave);
+ rrddim_set_by_pointer(st_numa, rd_other, numa_other);
- rrddim_set(st_numa, "pte_updates", numa_pte_updates);
- rrddim_set(st_numa, "huge_pte_updates", numa_huge_pte_updates);
- rrddim_set(st_numa, "hint_faults", numa_hint_faults);
- rrddim_set(st_numa, "hint_faults_local", numa_hint_faults_local);
- rrddim_set(st_numa, "pages_migrated", numa_pages_migrated);
+ rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates);
+ rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates);
+ rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults);
+ rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local);
+ rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated);
rrdset_done(st_numa);
}
diff --git a/src/rrd.h b/src/rrd.h
index 5bc61dcb8..1ebb89e5a 100644
--- a/src/rrd.h
+++ b/src/rrd.h
@@ -462,6 +462,9 @@ struct rrdhost {
RRDCALC *alarms;
ALARM_LOG health_log; // alarms historical events (event log)
+ uint32_t health_last_processed_id; // the last processed health id from the log
+ uint32_t health_max_unique_id; // the max alarm log unique id given for the host
+ uint32_t health_max_alarm_id; // the max alarm id given for the host
// templates of alarms
// these are used to create alarms when charts
diff --git a/src/rrd2json.c b/src/rrd2json.c
index 98080139c..84d904485 100644
--- a/src/rrd2json.c
+++ b/src/rrd2json.c
@@ -213,14 +213,14 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units);
else {
if(rd->multiplier < 0 || rd->divisor < 0) n = -n;
- n = roundl(n);
+ n = calculated_number_round(n);
if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n;
buffer_sprintf(wb, "NETDATA_%s_%s=\"%0.0Lf\" # %s\n", chart, dimension, n, st->units);
}
}
}
- total = roundl(total);
+ total = calculated_number_round(total);
buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"%0.0Lf\" # %s\n", chart, total, st->units);
rrdset_unlock(st);
}
@@ -243,7 +243,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
if(isnan(n) || isinf(n))
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units);
else {
- n = roundl(n);
+ n = calculated_number_round(n);
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"%0.0Lf\" # %s\n", chart, alarm, n, rc->units);
}
@@ -1573,13 +1573,13 @@ RRDR *rrd2rrdr(RRDSET *st, long points, long long after, long long before, int g
switch(group_method) {
case GROUP_MIN:
if(unlikely(isnan(group_values[c])) ||
- fabsl(value) < fabsl(group_values[c]))
+ calculated_number_fabs(value) < calculated_number_fabs(group_values[c]))
group_values[c] = value;
break;
case GROUP_MAX:
if(unlikely(isnan(group_values[c])) ||
- fabsl(value) > fabsl(group_values[c]))
+ calculated_number_fabs(value) > calculated_number_fabs(group_values[c]))
group_values[c] = value;
break;
diff --git a/src/rrddim.c b/src/rrddim.c
index e75aa3fd2..8df548397 100644
--- a/src/rrddim.c
+++ b/src/rrddim.c
@@ -167,11 +167,11 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte
}
if(rd->multiplier != multiplier) {
- info("File %s does not have the expected multiplier (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT ". Previous values may be wrong.", fullfilename, multiplier, rd->multiplier);
+ info("File %s does not have the expected multiplier (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, multiplier, rd->multiplier);
}
if(rd->divisor != divisor) {
- info("File %s does not have the expected divisor (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT ". Previous values may be wrong.", fullfilename, divisor, rd->divisor);
+ info("File %s does not have the expected divisor (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT "). Previous values may be wrong.", fullfilename, divisor, rd->divisor);
}
}
}
diff --git a/src/rrdpush.c b/src/rrdpush.c
index 6def90fe5..c1d052fd8 100644
--- a/src/rrdpush.c
+++ b/src/rrdpush.c
@@ -113,12 +113,12 @@ static inline void send_chart_definition(RRDSET *st) {
// sends the current chart dimensions
static inline void send_chart_metrics(RRDSET *st) {
- buffer_sprintf(st->rrdhost->rrdpush_buffer, "BEGIN %s %llu\n", st->id, (st->upstream_resync_time > st->last_collected_time.tv_sec)?st->usec_since_last_update:0);
+ buffer_sprintf(st->rrdhost->rrdpush_buffer, "BEGIN \"%s\" %llu\n", st->id, (st->upstream_resync_time > st->last_collected_time.tv_sec)?st->usec_since_last_update:0);
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rd->updated && rd->exposed)
- buffer_sprintf(st->rrdhost->rrdpush_buffer, "SET %s = " COLLECTED_NUMBER_FORMAT "\n"
+ buffer_sprintf(st->rrdhost->rrdpush_buffer, "SET \"%s\" = " COLLECTED_NUMBER_FORMAT "\n"
, rd->id
, rd->collected_value
);
@@ -380,7 +380,7 @@ void *rrdpush_sender_thread(void *ptr) {
// allow appending data into rrdpush_buffer
host->rrdpush_connected = 1;
- debug(D_STREAM, "Connected...");
+ debug(D_STREAM, "STREAM: Connected on fd %d...", host->rrdpush_socket);
}
ifd->fd = host->rrdpush_pipe[PIPE_READ];
@@ -389,13 +389,13 @@ void *rrdpush_sender_thread(void *ptr) {
ofd->fd = host->rrdpush_socket;
ofd->revents = 0;
- if(begin < buffer_strlen(host->rrdpush_buffer)) {
- debug(D_STREAM, "STREAM: Requesting data output on streaming socket...");
+ if(ofd->fd != -1 && begin < buffer_strlen(host->rrdpush_buffer)) {
+ debug(D_STREAM, "STREAM: Requesting data output on streaming socket %d...", ofd->fd);
ofd->events = POLLOUT;
fdmax = 2;
}
else {
- debug(D_STREAM, "STREAM: Not requesting data output on streaming socket (nothing to send now)...");
+ debug(D_STREAM, "STREAM: Not requesting data output on streaming socket %d (nothing to send now)...", ofd->fd);
ofd->events = 0;
fdmax = 1;
}
@@ -419,7 +419,7 @@ void *rrdpush_sender_thread(void *ptr) {
break;
}
else if(likely(retval)) {
- if (ifd->revents & POLLIN) {
+ if (ifd->revents & POLLIN || ifd->revents & POLLPRI) {
debug(D_STREAM, "STREAM: Data added to send buffer (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_buffer));
char buffer[1000 + 1];
@@ -427,67 +427,98 @@ void *rrdpush_sender_thread(void *ptr) {
error("STREAM %s [send to %s]: cannot read from internal pipe.", host->hostname, connected_to);
}
- if (ofd->revents & POLLOUT && begin < buffer_strlen(host->rrdpush_buffer)) {
- debug(D_STREAM, "STREAM: Sending data (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_buffer));
-
- // BEGIN RRDPUSH LOCKED SESSION
-
- // during this session, data collectors
- // will not be able to append data to our buffer
- // but the socket is in non-blocking mode
- // so, we will not block at send()
-
- if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0)
- error("STREAM %s [send]: cannot set pthread cancel state to DISABLE.", host->hostname);
-
- debug(D_STREAM, "STREAM: Getting exclusive lock on host...");
- rrdpush_lock(host);
-
- debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_buffer));
- ssize_t ret = send(host->rrdpush_socket, &host->rrdpush_buffer->buffer[begin], buffer_strlen(host->rrdpush_buffer) - begin, MSG_DONTWAIT);
- if (unlikely(ret == -1)) {
- if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) {
- debug(D_STREAM, "STREAM: Send failed - closing socket...");
- error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
- close(host->rrdpush_socket);
- host->rrdpush_socket = -1;
+ if (ofd->revents & POLLOUT) {
+ if (begin < buffer_strlen(host->rrdpush_buffer)) {
+ debug(D_STREAM, "STREAM: Sending data (current buffer length %zu bytes, begin = %zu)...", buffer_strlen(host->rrdpush_buffer), begin);
+
+ // BEGIN RRDPUSH LOCKED SESSION
+
+ // during this session, data collectors
+ // will not be able to append data to our buffer
+ // but the socket is in non-blocking mode
+ // so, we will not block at send()
+
+ if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0)
+ error("STREAM %s [send]: cannot set pthread cancel state to DISABLE.", host->hostname);
+
+ debug(D_STREAM, "STREAM: Getting exclusive lock on host...");
+ rrdpush_lock(host);
+
+ debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_buffer));
+ ssize_t ret = send(host->rrdpush_socket, &host->rrdpush_buffer->buffer[begin], buffer_strlen(host->rrdpush_buffer) - begin, MSG_DONTWAIT);
+ if (unlikely(ret == -1)) {
+ if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) {
+ debug(D_STREAM, "STREAM: Send failed - closing socket...");
+ error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
+ }
+ else {
+ debug(D_STREAM, "STREAM: Send failed - will retry...");
+ }
+ }
+ else if (likely(ret > 0)) {
+ // DEBUG - dump the scring to see it
+ //char c = host->rrdpush_buffer->buffer[begin + ret];
+ //host->rrdpush_buffer->buffer[begin + ret] = '\0';
+ //debug(D_STREAM, "STREAM: sent from %zu to %zd:\n%s\n", begin, ret, &host->rrdpush_buffer->buffer[begin]);
+ //host->rrdpush_buffer->buffer[begin + ret] = c;
+
+ sent_connection += ret;
+ sent_bytes += ret;
+ begin += ret;
+
+ if (begin == buffer_strlen(host->rrdpush_buffer)) {
+ // we send it all
+
+ debug(D_STREAM, "STREAM: Sent %zd bytes (the whole buffer)...", ret);
+ buffer_flush(host->rrdpush_buffer);
+ begin = 0;
+ }
+ else {
+ debug(D_STREAM, "STREAM: Sent %zd bytes (part of the data buffer)...", ret);
+ }
+
+ last_sent_t = now_monotonic_sec();
}
else {
- debug(D_STREAM, "STREAM: Send failed - will retry...");
+ debug(D_STREAM, "STREAM: send() returned %zd - closing the socket...", ret);
+ error("STREAM %s [send to %s]: failed to send metrics (send() returned %zd) - closing connection - we have sent %zu bytes on this connection.",
+ host->hostname, connected_to, ret, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
}
- }
- else if(likely(ret > 0)) {
- sent_connection += ret;
- sent_bytes += ret;
- begin += ret;
- if (begin == buffer_strlen(host->rrdpush_buffer)) {
- // we send it all
+ debug(D_STREAM, "STREAM: Releasing exclusive lock on host...");
+ rrdpush_unlock(host);
- debug(D_STREAM, "STREAM: Sent %zd bytes (the whole buffer)...", ret);
- buffer_flush(host->rrdpush_buffer);
- begin = 0;
- }
- else {
- debug(D_STREAM, "STREAM: Sent %zd bytes (part of the data buffer)...", ret);
- }
+ if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
+ error("STREAM %s [send]: cannot set pthread cancel state to ENABLE.", host->hostname);
- last_sent_t = now_monotonic_sec();
+ // END RRDPUSH LOCKED SESSION
}
else {
- debug(D_STREAM, "STREAM: send() returned %zd - closing the socket...", ret);
- error("STREAM %s [send to %s]: failed to send metrics (send() returned %zd) - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, ret, sent_connection);
- close(host->rrdpush_socket);
- host->rrdpush_socket = -1;
+ debug(D_STREAM, "STREAM: we have sent the entire buffer, but we received POLLOUT...");
}
+ }
- debug(D_STREAM, "STREAM: Releasing exclusive lock on host...");
- rrdpush_unlock(host);
-
- if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- error("STREAM %s [send]: cannot set pthread cancel state to ENABLE.", host->hostname);
-
- // END RRDPUSH LOCKED SESSION
+ if(unlikely(ofd->revents & POLLERR)) {
+ debug(D_STREAM, "STREAM: Send failed (POLLERR) - closing socket...");
+ error("STREAM %s [send to %s]: connection reports errors (POLLERR), closing it - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
+ }
+ else if(unlikely(ofd->revents & POLLHUP)) {
+ debug(D_STREAM, "STREAM: Send failed (POLLHUP) - closing socket...");
+ error("STREAM %s [send to %s]: connection closed by remote end (POLLHUP) - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
+ }
+ else if(unlikely(ofd->revents & POLLNVAL)) {
+ debug(D_STREAM, "STREAM: Send failed (POLLNVAL) - closing socket...");
+ error("STREAM %s [send to %s]: connection is invalid (POLLNVAL), closing it - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
}
}
else {
@@ -602,7 +633,7 @@ static int rrdpush_receive(int fd, const char *key, const char *hostname, const
, host->rrd_history_entries
, rrd_memory_mode_name(host->rrd_memory_mode)
, (health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto")
- , host->tags
+ , host->tags?host->tags:""
);
#endif // NETDATA_INTERNAL_CHECKS
@@ -648,8 +679,15 @@ static int rrdpush_receive(int fd, const char *key, const char *hostname, const
host->connected_senders++;
rrdhost_flag_clear(host, RRDHOST_ORPHAN);
- if(health_enabled != CONFIG_BOOLEAN_NO)
- host->health_delay_up_to = now_realtime_sec() + alarms_delay;
+ if(health_enabled != CONFIG_BOOLEAN_NO) {
+ if(alarms_delay > 0) {
+ host->health_delay_up_to = now_realtime_sec() + alarms_delay;
+ info("Postponing health checks for %ld seconds, on host '%s', because it was just connected."
+ , alarms_delay
+ , host->hostname
+ );
+ }
+ }
rrdhost_unlock(host);
// call the plugins.d processor to receive the metrics
diff --git a/src/rrdset.c b/src/rrdset.c
index caa427ff6..c5168f02e 100644
--- a/src/rrdset.c
+++ b/src/rrdset.c
@@ -582,10 +582,15 @@ RRDSET *rrdset_create_custom(
st->chart_type = rrdset_type_id(config_get(st->config_section, "chart type", rrdset_type_name(chart_type)));
st->type = config_get(st->config_section, "type", type);
+
st->family = config_get(st->config_section, "family", family?family:st->type);
+ json_fix_string(st->family);
+
st->units = config_get(st->config_section, "units", units?units:"");
+ json_fix_string(st->units);
st->context = config_get(st->config_section, "context", context?context:st->id);
+ json_fix_string(st->context);
st->hash_context = simple_hash(st->context);
st->priority = config_get_number(st->config_section, "priority", priority);
diff --git a/src/signals.c b/src/signals.c
new file mode 100644
index 000000000..331e80358
--- /dev/null
+++ b/src/signals.c
@@ -0,0 +1,168 @@
+#include "common.h"
+
+typedef enum signal_action {
+ NETDATA_SIGNAL_END_OF_LIST,
+ NETDATA_SIGNAL_IGNORE,
+ NETDATA_SIGNAL_EXIT_CLEANLY,
+ NETDATA_SIGNAL_SAVE_DATABASE,
+ NETDATA_SIGNAL_LOG_ROTATE,
+ NETDATA_SIGNAL_RELOAD_HEALTH,
+ NETDATA_SIGNAL_FATAL,
+} SIGNAL_ACTION;
+
+static struct {
+ int signo; // the signal
+ const char *name; // the name of the signal
+ size_t count; // the number of signals received
+ SIGNAL_ACTION action; // the action to take
+} signals_waiting[] = {
+ { SIGPIPE, "SIGPIPE", 0, NETDATA_SIGNAL_IGNORE },
+ { SIGINT , "SIGINT", 0, NETDATA_SIGNAL_EXIT_CLEANLY },
+ { SIGQUIT, "SIGQUIT", 0, NETDATA_SIGNAL_EXIT_CLEANLY },
+ { SIGTERM, "SIGTERM", 0, NETDATA_SIGNAL_EXIT_CLEANLY },
+ { SIGHUP, "SIGHUP", 0, NETDATA_SIGNAL_LOG_ROTATE },
+ { SIGUSR1, "SIGUSR1", 0, NETDATA_SIGNAL_SAVE_DATABASE },
+ { SIGUSR2, "SIGUSR2", 0, NETDATA_SIGNAL_RELOAD_HEALTH },
+ { SIGBUS, "SIGBUS", 0, NETDATA_SIGNAL_FATAL },
+
+ // terminator
+ { 0, "NONE", 0, NETDATA_SIGNAL_END_OF_LIST }
+};
+
+static void signal_handler(int signo) {
+ // find the entry in the list
+ int i;
+ for(i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST ; i++) {
+ if(unlikely(signals_waiting[i].signo == signo)) {
+ signals_waiting[i].count++;
+
+ if(signals_waiting[i].action == NETDATA_SIGNAL_FATAL) {
+ char buffer[200 + 1];
+ snprintfz(buffer, 200, "\nSIGNAL HANLDER: received: %s. Oops! This is bad!\n", signals_waiting[i].name);
+ if(write(STDERR_FILENO, buffer, strlen(buffer)) == -1) {
+ // nothing to do - we cannot write but there is no way to complaint about it
+ ;
+ }
+ }
+
+ return;
+ }
+ }
+}
+
+void signals_block(void) {
+ sigset_t sigset;
+ sigfillset(&sigset);
+
+ if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1)
+ error("SIGNAL: Could not block signals for threads");
+}
+
+void signals_unblock(void) {
+ sigset_t sigset;
+ sigfillset(&sigset);
+
+ if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) {
+ error("SIGNAL: Could not unblock signals for threads");
+ }
+}
+
+void signals_init(void) {
+ // Catch signals which we want to use
+ struct sigaction sa;
+ sa.sa_flags = 0;
+
+ // ignore all signals while we run in a signal handler
+ sigfillset(&sa.sa_mask);
+
+ int i;
+ for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
+ if(signals_waiting[i].action == NETDATA_SIGNAL_IGNORE)
+ sa.sa_handler = SIG_IGN;
+ else
+ sa.sa_handler = signal_handler;
+
+ if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
+ error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name);
+ }
+}
+
+void signals_reset(void) {
+ struct sigaction sa;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_handler = SIG_DFL;
+ sa.sa_flags = 0;
+
+ int i;
+ for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
+ if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
+ error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name);
+ }
+}
+
+void signals_handle(void) {
+ while(1) {
+
+ // pause() causes the calling process (or thread) to sleep until a signal
+ // is delivered that either terminates the process or causes the invocation
+ // of a signal-catching function.
+ if(pause() == -1 && errno == EINTR) {
+
+ // loop once, but keep looping while signals are coming in
+ // this is needed because a few operations may take some time
+ // so we need to check for new signals before pausing again
+ int found = 1;
+ while(found) {
+ found = 0;
+
+ // execute the actions of the signals
+ int i;
+ for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
+ if (signals_waiting[i].count) {
+ found = 1;
+ signals_waiting[i].count = 0;
+ const char *name = signals_waiting[i].name;
+
+ switch (signals_waiting[i].action) {
+ case NETDATA_SIGNAL_RELOAD_HEALTH:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Reloading HEALTH configuration...", name);
+ health_reload();
+ error_log_limit_reset();
+ break;
+
+ case NETDATA_SIGNAL_SAVE_DATABASE:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Saving databases...", name);
+ rrdhost_save_all();
+ info("Databases saved.");
+ error_log_limit_reset();
+ break;
+
+ case NETDATA_SIGNAL_LOG_ROTATE:
+ error_log_limit_unlimited();
+ info("SIGNAL: Received %s. Reopening all log files...", name);
+ reopen_all_log_files();
+ error_log_limit_reset();
+ break;
+
+ case NETDATA_SIGNAL_EXIT_CLEANLY:
+ info("SIGNAL: Received %s. Cleaning up to exit...", name);
+ netdata_cleanup_and_exit(0);
+ exit(0);
+
+ case NETDATA_SIGNAL_FATAL:
+ fatal("SIGNAL: Received %s. netdata now exits.", name);
+
+ default:
+ info("SIGNAL: Received %s. No signal handler configured. Ignoring it.", name);
+ break;
+ }
+ }
+ }
+ }
+ }
+ else
+ error("SIGNAL: pause() returned but it was not interrupted by a signal.");
+ }
+}
diff --git a/src/signals.h b/src/signals.h
new file mode 100644
index 000000000..d8611edea
--- /dev/null
+++ b/src/signals.h
@@ -0,0 +1,10 @@
+#ifndef NETDATA_SIGNALS_H
+#define NETDATA_SIGNALS_H
+
+extern void signals_init(void);
+extern void signals_block(void);
+extern void signals_unblock(void);
+extern void signals_handle(void);
+extern void signals_reset(void);
+
+#endif //NETDATA_SIGNALS_H
diff --git a/src/socket.c b/src/socket.c
index 2b3821190..d28df81a6 100644
--- a/src/socket.c
+++ b/src/socket.c
@@ -46,9 +46,10 @@ int sock_setreuse(int fd, int reuse) {
int sock_setreuse_port(int fd, int reuse) {
int ret = -1;
+
#ifdef SO_REUSEPORT
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse));
- if(ret == -1)
+ if(ret == -1 && errno != ENOPROTOOPT)
error("failed to set SO_REUSEPORT on socket %d", fd);
#endif
@@ -80,6 +81,50 @@ int sock_enlarge_out(int fd) {
// --------------------------------------------------------------------------------------------------------------------
// listening sockets
+int create_listen_socket_unix(const char *path, int listen_backlog) {
+ int sock;
+
+ debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if(sock < 0) {
+ error("LISTENER: UNIX socket() on path '%s' failed.", path);
+ return -1;
+ }
+
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ struct sockaddr_un name;
+ memset(&name, 0, sizeof(struct sockaddr_un));
+ name.sun_family = AF_UNIX;
+ strncpy(name.sun_path, path, sizeof(name.sun_path)-1);
+
+ errno = 0;
+ if (unlink(path) == -1 && errno != ENOENT)
+ error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path);
+
+ if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: UNIX bind() on path '%s' failed.", path);
+ return -1;
+ }
+
+ // we have to chmod this to 0777 so that the client will be able
+ // to read from and write to this socket.
+ if(chmod(path, 0777) == -1)
+ error("LISTENER: failed to chmod() socket file '%s'.", path);
+
+ if(listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: UNIX listen() on path '%s' failed.", path);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path);
+ return sock;
+}
+
int create_listen_socket4(int socktype, const char *ip, int port, int listen_backlog) {
int sock;
@@ -176,7 +221,7 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p
return sock;
}
-static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int socktype, const char *protocol, const char *ip, int port) {
+static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, int port) {
if(sockets->opened >= MAX_LISTEN_FDS) {
error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype);
close(fd);
@@ -184,11 +229,27 @@ static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int sockty
}
sockets->fds[sockets->opened] = fd;
+ sockets->fds_types[sockets->opened] = socktype;
+ sockets->fds_families[sockets->opened] = family;
char buffer[100 + 1];
- snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
+
+ switch(family) {
+ case AF_INET:
+ snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port);
+ break;
+
+ case AF_INET6:
+ default:
+ snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
+ break;
+
+ case AF_UNIX:
+ snprintfz(buffer, 100, "%s:%s", protocol, ip);
+ break;
+ }
+
sockets->fds_names[sockets->opened] = strdupz(buffer);
- sockets->fds_types[sockets->opened] = socktype;
sockets->opened++;
return 0;
@@ -230,7 +291,7 @@ void listen_sockets_close(LISTEN_SOCKETS *sockets) {
sockets->failed = 0;
}
-static inline int bind_to_one(LISTEN_SOCKETS *sockets, const char *definition, int default_port, int listen_backlog) {
+static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, int default_port, int listen_backlog) {
int added = 0;
struct addrinfo hints;
struct addrinfo *result = NULL, *rp = NULL;
@@ -258,6 +319,22 @@ static inline int bind_to_one(LISTEN_SOCKETS *sockets, const char *definition, i
socktype = SOCK_DGRAM;
protocol_str = "udp";
}
+ else if(strncmp(ip, "unix:", 5) == 0) {
+ char *path = ip + 5;
+ socktype = SOCK_STREAM;
+ protocol_str = "unix";
+
+ int fd = create_listen_socket_unix(path, listen_backlog);
+ if (fd == -1) {
+ error("LISTENER: Cannot create unix socket '%s'", path);
+ sockets->failed++;
+ }
+ else {
+ listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0);
+ added++;
+ }
+ return added;
+ }
char *e = ip;
if(*e == '[') {
@@ -314,11 +391,13 @@ static inline int bind_to_one(LISTEN_SOCKETS *sockets, const char *definition, i
for (rp = result; rp != NULL; rp = rp->ai_next) {
int fd = -1;
+ int family = -1;
char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID";
int rport = default_port;
- switch (rp->ai_addr->sa_family) {
+ family = rp->ai_addr->sa_family;
+ switch (family) {
case AF_INET: {
struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
@@ -338,7 +417,7 @@ static inline int bind_to_one(LISTEN_SOCKETS *sockets, const char *definition, i
}
default:
- debug(D_LISTENER, "LISTENER: Unknown socket family %d", rp->ai_addr->sa_family);
+ debug(D_LISTENER, "LISTENER: Unknown socket family %d", family);
break;
}
@@ -347,7 +426,7 @@ static inline int bind_to_one(LISTEN_SOCKETS *sockets, const char *definition, i
sockets->failed++;
}
else {
- listen_sockets_add(sockets, fd, socktype, protocol_str, rip, rport);
+ listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport);
added++;
}
}
@@ -385,7 +464,7 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
char buf[e - s + 1];
strncpyz(buf, s, e - s);
- bind_to_one(sockets, buf, sockets->default_port, sockets->backlog);
+ bind_to_this(sockets, buf, sockets->default_port, sockets->backlog);
s = e;
}
@@ -403,7 +482,39 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
// --------------------------------------------------------------------------------------------------------------------
// connect to another host/port
-// _connect_to()
+// connect_to_this_unix()
+// path the path of the unix socket
+// timeout the timeout for establishing a connection
+
+static inline int connect_to_unix(const char *path, struct timeval *timeout) {
+ int fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if(fd == -1) {
+ error("Failed to create UNIX socket() for '%s'", path);
+ return -1;
+ }
+
+ if(timeout) {
+ if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
+ error("Failed to set timeout on UNIX socket '%s'", path);
+ }
+
+ struct sockaddr_un addr;
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1);
+
+ if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
+ error("Cannot connect to UNIX socket on path '%s'.", path);
+ close(fd);
+ return -1;
+ }
+
+ debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path);
+
+ return fd;
+}
+
+// connect_to_this_ip46()
// protocol IPPROTO_TCP, IPPROTO_UDP
// socktype SOCK_STREAM, SOCK_DGRAM
// host the destination hostname or IP address (IPv4 or IPv6) to connect to
@@ -413,7 +524,7 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
// service the service name or port to connect to
// timeout the timeout for establishing a connection
-static inline int _connect_to(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) {
+static inline int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) {
struct addrinfo hints;
struct addrinfo *ai_head = NULL, *ai = NULL;
@@ -519,7 +630,7 @@ static inline int _connect_to(int protocol, int socktype, const char *host, uint
return fd;
}
-// connect_to()
+// connect_to_this()
//
// definition format:
//
@@ -530,7 +641,7 @@ static inline int _connect_to(int protocol, int socktype, const char *host, uint
// INTERFACE = for IPv6 only, the network interface to use
// PORT = port number or service name
-int connect_to(const char *definition, int default_port, struct timeval *timeout) {
+int connect_to_this(const char *definition, int default_port, struct timeval *timeout) {
char buffer[strlen(definition) + 1];
strcpy(buffer, definition);
@@ -551,6 +662,10 @@ int connect_to(const char *definition, int default_port, struct timeval *timeout
protocol = IPPROTO_UDP;
socktype = SOCK_DGRAM;
}
+ else if(strncmp(host, "unix:", 5) == 0) {
+ char *path = host + 5;
+ return connect_to_unix(path, timeout);
+ }
char *e = host;
if(*e == '[') {
@@ -595,7 +710,7 @@ int connect_to(const char *definition, int default_port, struct timeval *timeout
service = default_service;
- return _connect_to(protocol, socktype, host, scope_id, service, timeout);
+ return connect_to_this_ip46(protocol, socktype, host, scope_id, service, timeout);
}
int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) {
@@ -617,7 +732,7 @@ int connect_to_one_of(const char *destination, int default_port, struct timeval
char buf[e - s + 1];
strncpyz(buf, s, e - s);
if(reconnects_counter) *reconnects_counter += 1;
- sock = connect_to(buf, default_port, timeout);
+ sock = connect_to_this(buf, default_port, timeout);
if(sock != -1) {
if(connected_to && connected_to_size) {
strncpy(connected_to, buf, connected_to_size);
@@ -747,7 +862,7 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien
socklen_t addrlen = sizeof(sadr);
int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags);
- if (nfd >= 0) {
+ if (likely(nfd >= 0)) {
if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
error("LISTENER: cannot getnameinfo() on received client connection.");
strncpyz(client_ip, "UNKNOWN", ipsize - 1);
@@ -775,6 +890,10 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien
break;
}
}
+#ifdef HAVE_ACCEPT4
+ else if(errno == ENOSYS)
+ error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ...");
+#endif
return nfd;
}
diff --git a/src/socket.h b/src/socket.h
index bb95347ab..8ca7288c9 100644
--- a/src/socket.h
+++ b/src/socket.h
@@ -15,13 +15,14 @@ typedef struct listen_sockets {
size_t failed; // the number of sockets attempted to open, but failed
int fds[MAX_LISTEN_FDS]; // the open sockets
char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets
- int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets
+ int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM)
+ int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6)
} LISTEN_SOCKETS;
extern int listen_sockets_setup(LISTEN_SOCKETS *sockets);
extern void listen_sockets_close(LISTEN_SOCKETS *sockets);
-extern int connect_to(const char *definition, int default_port, struct timeval *timeout);
+extern int connect_to_this(const char *definition, int default_port, struct timeval *timeout);
extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size);
extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
diff --git a/src/statsd.c b/src/statsd.c
index 4dd04757b..08ce3e2f5 100644
--- a/src/statsd.c
+++ b/src/statsd.c
@@ -232,6 +232,7 @@ static struct statsd {
int update_every;
SIMPLE_PATTERN *charts_for;
+ size_t decimal_detail;
size_t private_charts;
size_t max_private_charts;
size_t max_private_charts_hard;
@@ -250,6 +251,7 @@ static struct statsd {
.max_private_charts = 200,
.max_private_charts_hard = 1000,
.recvmmsg_size = 10,
+ .decimal_detail = STATSD_DECIMAL_DETAIL,
.gauges = {
.name = "gauge",
@@ -443,7 +445,7 @@ static inline void statsd_process_counter(STATSD_METRIC *m, const char *value, c
if(unlikely(m->reset)) statsd_reset_metric(m);
- m->counter.value += roundl((long double)statsd_parse_int(value, 1) / statsd_parse_float(sampling, 1.0));
+ m->counter.value += llrintl((long double)statsd_parse_int(value, 1) / statsd_parse_float(sampling, 1.0));
m->events++;
m->count++;
@@ -1286,7 +1288,7 @@ static inline void statsd_private_chart_gauge(STATSD_METRIC *m) {
, RRDSET_TYPE_LINE
);
- m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1394,13 +1396,13 @@ static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, con
, RRDSET_TYPE_AREA
);
- m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
- m->rd_value = rrddim_add(m->st, "average", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_value = rrddim_add(m->st, "average", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1429,7 +1431,7 @@ static inline void statsd_flush_gauge(STATSD_METRIC *m) {
int updated = 0;
if(m->count && !m->reset) {
- m->last = (collected_number) (m->gauge.value * STATSD_DECIMAL_DETAIL);
+ m->last = (collected_number) (m->gauge.value * statsd.decimal_detail);
m->reset = 1;
updated = 1;
@@ -1488,18 +1490,18 @@ static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char
long double *series = m->histogram.ext->values;
sort_series(series, len);
- m->histogram.ext->last_min = (collected_number)roundl(series[0] * STATSD_DECIMAL_DETAIL);
- m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * STATSD_DECIMAL_DETAIL);
- m->last = (collected_number)roundl(average(series, len) * STATSD_DECIMAL_DETAIL);
- m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * STATSD_DECIMAL_DETAIL);
- m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * STATSD_DECIMAL_DETAIL);
- m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_min = (collected_number)roundl(series[0] * statsd.decimal_detail);
+ m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * statsd.decimal_detail);
+ m->last = (collected_number)roundl(average(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * statsd.decimal_detail);
size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0);
if(pct_len < 1)
- m->histogram.ext->last_percentile = (collected_number)(series[0] * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_percentile = (collected_number)(series[0] * statsd.decimal_detail);
else
- m->histogram.ext->last_percentile = (collected_number)roundl(average(series, pct_len) * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_percentile = (collected_number)roundl(series[pct_len - 1] * statsd.decimal_detail);
debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT,
dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum);
@@ -1574,7 +1576,7 @@ static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC
}
else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
dim->algorithm = RRD_ALGORITHM_ABSOLUTE;
- dim->divisor *= STATSD_DECIMAL_DETAIL;
+ dim->divisor *= statsd.decimal_detail;
switch(dim->value_type) {
case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS:
@@ -1619,7 +1621,7 @@ static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC
dim->algorithm = statsd_algorithm_for_metric(m);
if(m->type == STATSD_METRIC_TYPE_GAUGE)
- dim->divisor *= STATSD_DECIMAL_DETAIL;
+ dim->divisor *= statsd.decimal_detail;
}
if(unlikely(chart->st && dim->rd)) {
@@ -1776,6 +1778,7 @@ void *statsd_main(void *ptr) {
statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts * 5);
statsd.private_charts_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_STATSD, "private charts memory mode", rrd_memory_mode_name(default_rrd_memory_mode)));
statsd.private_charts_rrd_history_entries = (int)config_get_number(CONFIG_SECTION_STATSD, "private charts history", default_rrd_history_entries);
+ statsd.decimal_detail = (size_t)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
@@ -1922,13 +1925,13 @@ void *statsd_main(void *ptr) {
, "statsd"
, NULL
, "Bytes read by the netdata statsd server"
- , "kbps"
+ , "kilobits/s"
, 132003
, statsd.update_every
, RRDSET_TYPE_STACKED
);
- RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
- RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
RRDSET *st_packets = rrdset_create_localhost(
"netdata"
diff --git a/src/storage_number.c b/src/storage_number.c
index 27fe5f2c7..054941202 100644
--- a/src/storage_number.c
+++ b/src/storage_number.c
@@ -2,12 +2,13 @@
extern char *print_number_lu_r(char *str, unsigned long uvalue);
extern char *print_number_llu_r(char *str, unsigned long long uvalue);
+extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue);
storage_number pack_storage_number(calculated_number value, uint32_t flags)
{
// bit 32 = sign 0:positive, 1:negative
// bit 31 = 0:divide, 1:multiply
- // bit 30, 29, 28 = (multiplier or divider) 0-6 (7 total)
+ // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total)
// bit 27, 26, 25 flags
// bit 24 to bit 1 = the value
@@ -105,6 +106,7 @@ calculated_number unpack_storage_number(storage_number value)
return n;
}
+/*
int print_calculated_number(char *str, calculated_number value)
{
char *wstr = str;
@@ -113,21 +115,14 @@ int print_calculated_number(char *str, calculated_number value)
if(sign) value = -value;
#ifdef STORAGE_WITH_MATH
- // without llrint() there are rounding problems
+ // without llrintl() there are rounding problems
// for example 0.9 becomes 0.89
- unsigned long long uvalue = (unsigned long long int) llrint(value * (calculated_number)100000);
+ unsigned long long uvalue = (unsigned long long int) llrintl(value * (calculated_number)100000);
#else
unsigned long long uvalue = value * (calculated_number)100000;
#endif
-#ifdef ENVIRONMENT32
- if(uvalue > (unsigned long long)0xffffffff)
- wstr = print_number_llu_r(str, uvalue);
- else
- wstr = print_number_lu_r(str, uvalue);
-#else
- do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
-#endif
+ wstr = print_number_llu_r_smart(str, uvalue);
// make sure we have 6 bytes at least
while((wstr - str) < 6) *wstr++ = '0';
@@ -166,3 +161,62 @@ int print_calculated_number(char *str, calculated_number value)
// return the buffer length
return (int) ((wstr - str) + 2 + decimal );
}
+*/
+
+int print_calculated_number(char *str, calculated_number value) {
+ char integral_str[50], fractional_str[50];
+
+ char *wstr = str;
+
+ if(unlikely(value < 0)) {
+ *wstr++ = '-';
+ value = -value;
+ }
+
+ calculated_number integral, fractional;
+
+#ifdef STORAGE_WITH_MATH
+ fractional = modfl(value, &integral) * 10000000.0;
+#else
+ fractional = ((unsigned long long)(value * 10000000ULL) % 10000000ULL);
+#endif
+
+ char *istre;
+ if(integral == 0.0) {
+ integral_str[0] = '0';
+ istre = &integral_str[1];
+ }
+ else
+ // convert the integral part to string (reversed)
+ istre = print_number_llu_r_smart(integral_str, (unsigned long long)integral);
+
+ // copy reversed the integral string
+ istre--;
+ while( istre >= integral_str ) *wstr++ = *istre--;
+
+ if(fractional != 0.0) {
+ // add a dot
+ *wstr++ = '.';
+
+ // convert the fractional part to string (reversed)
+ char *fstre = print_number_llu_r_smart(fractional_str, (unsigned long long)calculated_number_llrint(fractional));
+
+ // prepend zeros to reach 7 digits length
+ int decimal = 7;
+ int len = (int)(fstre - fractional_str);
+ while(len < decimal) {
+ *wstr++ = '0';
+ len++;
+ }
+
+ char *begin = fractional_str;
+ while(begin < fstre && *begin == '0') begin++;
+
+ // copy reversed the fractional string
+ fstre--;
+ while( fstre >= begin ) *wstr++ = *fstre--;
+ }
+
+ *wstr = '\0';
+ return (int)(wstr - str);
+}
diff --git a/src/storage_number.h b/src/storage_number.h
index 34ed0d89c..3c1b6bab3 100644
--- a/src/storage_number.h
+++ b/src/storage_number.h
@@ -14,6 +14,10 @@ typedef long double collected_number;
#define COLLECTED_NUMBER_FORMAT "%0.7Lf"
*/
+#define calculated_number_llrint(x) llrintl(x)
+#define calculated_number_round(x) roundl(x)
+#define calculated_number_fabs(x) fabsl(x)
+
typedef uint32_t storage_number;
#define STORAGE_NUMBER_FORMAT "%u"
@@ -28,7 +32,7 @@ typedef uint32_t storage_number;
#define SN_FLAGS_MASK (~(0x6 << 24))
// extract the flags
-#define get_storage_number_flags(value) ((((storage_number)value) & (1 << 24)) | (((storage_number)value) & (2 << 24)) | (((storage_number)value) & (4 << 24)))
+#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (2 << 24)) | (((storage_number)(value)) & (4 << 24)))
#define SN_EMPTY_SLOT 0x00000000
// checks
@@ -40,13 +44,13 @@ calculated_number unpack_storage_number(storage_number value);
int print_calculated_number(char *str, calculated_number value);
-#define STORAGE_NUMBER_POSITIVE_MAX 167772150000000.0
-#define STORAGE_NUMBER_POSITIVE_MIN 0.00001
-#define STORAGE_NUMBER_NEGATIVE_MAX -0.00001
-#define STORAGE_NUMBER_NEGATIVE_MIN -167772150000000.0
+#define STORAGE_NUMBER_POSITIVE_MAX (167772150000000.0)
+#define STORAGE_NUMBER_POSITIVE_MIN (0.0000001)
+#define STORAGE_NUMBER_NEGATIVE_MAX (-0.0000001)
+#define STORAGE_NUMBER_NEGATIVE_MIN (-167772150000000.0)
// accepted accuracy loss
#define ACCURACY_LOSS 0.0001
-#define accuracy_loss(t1, t2) ((t1 == t2 || t1 == 0.0 || t2 == 0.0) ? 0.0 : (100.0 - ((t1 > t2) ? (t2 * 100.0 / t1 ) : (t1 * 100.0 / t2))))
+#define accuracy_loss(t1, t2) (((t1) == (t2) || (t1) == 0.0 || (t2) == 0.0) ? 0.0 : (100.0 - (((t1) > (t2)) ? ((t2) * 100.0 / (t1) ) : ((t1) * 100.0 / (t2)))))
#endif /* NETDATA_STORAGE_NUMBER_H */
diff --git a/src/sys_devices_system_edac_mc.c b/src/sys_devices_system_edac_mc.c
index c41ad7faa..7ec989434 100644
--- a/src/sys_devices_system_edac_mc.c
+++ b/src/sys_devices_system_edac_mc.c
@@ -27,7 +27,7 @@ static void find_all_mc() {
char *dirname = config_get("plugin:proc:/sys/devices/system/edac/mc", "directory to monitor", name);
DIR *dir = opendir(dirname);
- if(!dir) {
+ if(unlikely(!dir)) {
error("Cannot read ECC memory errors directory '%s'", dirname);
return;
}
@@ -132,21 +132,30 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) {
static RRDSET *ce_st = NULL;
if(unlikely(!ce_st)) {
- ce_st = rrdset_find_localhost("mem.ecc_ce");
- if(unlikely(!ce_st))
- ce_st = rrdset_create_localhost("mem", "ecc_ce", NULL, "ecc", NULL, "ECC Memory Correctable Errors"
- , "errors", 6600, update_every, RRDSET_TYPE_LINE);
-
- for(m = mc_root; m; m = m->next)
- if(m->ce_count_filename)
- m->ce_rd = rrddim_add(ce_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ ce_st = rrdset_create_localhost(
+ "mem"
+ , "ecc_ce"
+ , NULL
+ , "ecc"
+ , NULL
+ , "ECC Memory Correctable Errors"
+ , "errors"
+ , 6600
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
}
else
rrdset_next(ce_st);
- for(m = mc_root; m; m = m->next)
- if(m->ce_count_filename && m->ce_updated)
+ for(m = mc_root; m; m = m->next) {
+ if (m->ce_count_filename && m->ce_updated) {
+ if(unlikely(!m->ce_rd))
+ m->ce_rd = rrddim_add(ce_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
rrddim_set_by_pointer(ce_st, m->ce_rd, m->ce_count);
+ }
+ }
rrdset_done(ce_st);
}
@@ -159,22 +168,30 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) {
static RRDSET *ue_st = NULL;
if(unlikely(!ue_st)) {
- ue_st = rrdset_find_localhost("mem.ecc_ue");
-
- if(unlikely(!ue_st))
- ue_st = rrdset_create_localhost("mem", "ecc_ue", NULL, "ecc", NULL, "ECC Memory Uncorrectable Errors"
- , "errors", 6610, update_every, RRDSET_TYPE_LINE);
-
- for(m = mc_root; m; m = m->next)
- if(m->ue_count_filename)
- m->ue_rd = rrddim_add(ue_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ ue_st = rrdset_create_localhost(
+ "mem"
+ , "ecc_ue"
+ , NULL
+ , "ecc"
+ , NULL
+ , "ECC Memory Uncorrectable Errors"
+ , "errors"
+ , 6610
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
}
else
rrdset_next(ue_st);
- for(m = mc_root; m; m = m->next)
- if(m->ue_count_filename && m->ue_updated)
+ for(m = mc_root; m; m = m->next) {
+ if (m->ue_count_filename && m->ue_updated) {
+ if(unlikely(!m->ue_rd))
+ m->ue_rd = rrddim_add(ue_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
rrddim_set_by_pointer(ue_st, m->ue_rd, m->ue_count);
+ }
+ }
rrdset_done(ue_st);
}
diff --git a/src/sys_fs_cgroup.c b/src/sys_fs_cgroup.c
index 0f9c8854a..c047547e7 100644
--- a/src/sys_fs_cgroup.c
+++ b/src/sys_fs_cgroup.c
@@ -41,7 +41,7 @@ static char *cgroup_memory_base = NULL;
static char *cgroup_devices_base = NULL;
static int cgroup_root_count = 0;
-static int cgroup_root_max = 500;
+static int cgroup_root_max = 1000;
static int cgroup_max_depth = 0;
static SIMPLE_PATTERN *enabled_cgroup_patterns = NULL;
@@ -50,6 +50,7 @@ static SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
static SIMPLE_PATTERN *systemd_services_cgroups = NULL;
static char *cgroups_rename_script = NULL;
+static char *cgroups_network_interface_script = NULL;
static int cgroups_check = 0;
@@ -103,7 +104,7 @@ void read_cgroup_plugin_configuration() {
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
if(!mi) {
- error("Cannot find cgroup cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
+ error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
s = "/sys/fs/cgroup/cpuacct";
}
else s = mi->mount_point;
@@ -113,7 +114,7 @@ void read_cgroup_plugin_configuration() {
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio");
if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
if(!mi) {
- error("Cannot find cgroup blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
+ error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
s = "/sys/fs/cgroup/blkio";
}
else s = mi->mount_point;
@@ -123,7 +124,7 @@ void read_cgroup_plugin_configuration() {
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory");
if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
if(!mi) {
- error("Cannot find cgroup memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
+ error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
s = "/sys/fs/cgroup/memory";
}
else s = mi->mount_point;
@@ -133,7 +134,7 @@ void read_cgroup_plugin_configuration() {
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices");
if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
if(!mi) {
- error("Cannot find cgroup devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
+ error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
s = "/sys/fs/cgroup/devices";
}
else s = mi->mount_point;
@@ -166,7 +167,7 @@ void read_cgroup_plugin_configuration() {
" !/docker "
" !/libvirt "
" !/lxc "
- " !/lxc/*/ns " // #1397
+ " !/lxc/*/* " // #1397 #2649
" !/machine "
" !/qemu "
" !/system "
@@ -184,13 +185,16 @@ void read_cgroup_plugin_configuration() {
" !/systemd "
" !/user "
" !/user.slice "
- " !/lxc/*/ns/* " // #2161
+ " !/lxc/*/* " // #2161 #2649
" * "
), SIMPLE_PATTERN_EXACT);
snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_plugins_dir);
cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename);
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup-network", netdata_configured_plugins_dir);
+ cgroups_network_interface_script = config_get("plugin:cgroups", "script to get cgroup network interfaces", filename);
+
enabled_cgroup_renames = simple_pattern_create(
config_get("plugin:cgroups", "run script to rename cgroups matching",
" *.scope "
@@ -329,6 +333,12 @@ struct cpuacct_usage {
unsigned long long *cpu_percpu;
};
+struct cgroup_network_interface {
+ const char *host_device;
+ const char *container_device;
+ struct cgroup_network_interface *next;
+};
+
#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001
#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
@@ -360,6 +370,8 @@ struct cgroup {
struct blkio io_merged; // operations
struct blkio io_queued; // operations
+ struct cgroup_network_interface *interfaces;
+
// per cgroup charts
RRDSET *st_cpu;
RRDSET *st_cpu_per_core;
@@ -433,7 +445,7 @@ static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) {
unsigned long i, lines = procfile_lines(ff);
if(unlikely(lines < 1)) {
- error("File '%s' should have 1+ lines.", cp->filename);
+ error("CGROUP: file '%s' should have 1+ lines.", cp->filename);
cp->updated = 0;
return;
}
@@ -475,7 +487,7 @@ static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) {
}
if(unlikely(procfile_lines(ff) < 1)) {
- error("File '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff));
+ error("CGROUP: file '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff));
ca->updated = 0;
return;
}
@@ -539,7 +551,7 @@ static inline void cgroup_read_blkio(struct blkio *io) {
unsigned long i, lines = procfile_lines(ff);
if(unlikely(lines < 1)) {
- error("File '%s' should have 1+ lines.", io->filename);
+ error("CGROUP: file '%s' should have 1+ lines.", io->filename);
io->updated = 0;
return;
}
@@ -612,7 +624,7 @@ static inline void cgroup_read_memory(struct memory *mem) {
unsigned long i, lines = procfile_lines(ff);
if(unlikely(lines < 1)) {
- error("File '%s' should have 1+ lines.", mem->filename_detailed);
+ error("CGROUP: file '%s' should have 1+ lines.", mem->filename_detailed);
mem->updated_detailed = 0;
goto memory_next;
}
@@ -718,6 +730,77 @@ static inline void read_all_cgroups(struct cgroup *root) {
}
// ----------------------------------------------------------------------------
+// cgroup network interfaces
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
+ debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+
+ pid_t cgroup_pid;
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+
+ snprintfz(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec %s --cgroup '%s%s'", cgroups_network_interface_script, cgroup_cpuacct_base, cg->id);
+
+ debug(D_CGROUP, "executing command '%s' for cgroup '%s'", buffer, cg->id);
+ FILE *fp = mypopen(buffer, &cgroup_pid);
+ if(fp) {
+ char *s;
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s) {
+ error("CGROUP: empty host interface returned by script");
+ continue;
+ }
+
+ if(!*t) {
+ error("CGROUP: empty container interface returned by script");
+ continue;
+ }
+
+ struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface));
+ i->host_device = strdupz(s);
+ i->container_device = strdupz(t);
+ i->next = cg->interfaces;
+ cg->interfaces = i;
+
+ info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
+
+ // register a device rename to proc_net_dev.c
+ netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id);
+ }
+ }
+
+ mypclose(fp, cgroup_pid);
+ // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id);
+ }
+ else
+ error("CGROUP: cannot popen(\"%s\", \"r\").", buffer);
+}
+
+static inline void free_cgroup_network_interfaces(struct cgroup *cg) {
+ while(cg->interfaces) {
+ struct cgroup_network_interface *i = cg->interfaces;
+ cg->interfaces = i->next;
+
+ // delete the registration of proc_net_dev rename
+ netdev_rename_device_del(i->host_device);
+
+ freez((void *)i->host_device);
+ freez((void *)i->container_device);
+ freez((void *)i);
+ }
+}
+
+// ----------------------------------------------------------------------------
// add/remove/find cgroup objects
#define CGROUP_CHARTID_LINE_MAX 1024
@@ -775,7 +858,7 @@ static inline void cgroup_get_chart_name(struct cgroup *cg) {
}
}
else
- error("CGROUP: Cannot popen(\"%s\", \"r\").", buffer);
+ error("CGROUP: cannot popen(\"%s\", \"r\").", buffer);
}
static inline struct cgroup *cgroup_add(const char *id) {
@@ -783,7 +866,7 @@ static inline struct cgroup *cgroup_add(const char *id) {
debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
if(cgroup_root_count >= cgroup_root_max) {
- info("Maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id);
+ info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id);
return NULL;
}
@@ -872,7 +955,7 @@ static inline struct cgroup *cgroup_add(const char *id) {
for (t = cgroup_root; t; t = t->next) {
if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
if (!strncmp(t->chart_id, "/system.slice/", 14) && !strncmp(cg->chart_id, "/init.scope/system.slice/", 25)) {
- error("Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
+ error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
cg->chart_id, t->id, cg->id, t->id);
debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
cg->chart_id, t->id, cg->id, t->id);
@@ -880,7 +963,7 @@ static inline struct cgroup *cgroup_add(const char *id) {
t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
}
else {
- error("Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
cg->chart_id, t->id, cg->id);
debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
cg->chart_id, t->id, cg->id);
@@ -893,6 +976,9 @@ static inline struct cgroup *cgroup_add(const char *id) {
}
}
+ if(cg->enabled && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))
+ read_cgroup_network_interfaces(cg);
+
debug(D_CGROUP, "ADDED CGROUP: '%s' with chart id '%s' and title '%s' as %s (default was %s)", cg->id, cg->chart_id, cg->chart_title, (cg->enabled)?"enabled":"disabled", (def)?"enabled":"disabled");
return cg;
@@ -916,6 +1002,8 @@ static inline void cgroup_free(struct cgroup *cg) {
if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops);
if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops);
+ free_cgroup_network_interfaces(cg);
+
freez(cg->cpuacct_usage.cpu_percpu);
freez(cg->cpuacct_stat.filename);
@@ -979,7 +1067,7 @@ static inline void found_subdir_in_dir(const char *dir) {
depth++;
if(depth > cgroup_max_depth) {
- info("cgroup '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
+ info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
return;
}
}
@@ -1004,7 +1092,7 @@ static inline int find_dir_in_subdirs(const char *base, const char *this, void (
DIR *dir = opendir(this);
if(!dir) {
- error("Cannot read cgroups directory '%s'", base);
+ error("CGROUP: cannot read directory '%s'", base);
return ret;
}
ret = 1;
@@ -1110,7 +1198,7 @@ static inline void find_all_cgroups() {
if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) {
cgroup_enable_cpuacct_stat =
cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
- error("disabled CGROUP cpu statistics.");
+ error("CGROUP: disabled cpu statistics.");
}
}
@@ -1122,7 +1210,7 @@ static inline void find_all_cgroups() {
cgroup_enable_blkio_throttle_ops =
cgroup_enable_blkio_merged_ops =
cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
- error("disabled CGROUP blkio statistics.");
+ error("CGROUP: disabled blkio statistics.");
}
}
@@ -1132,14 +1220,14 @@ static inline void find_all_cgroups() {
cgroup_enable_detailed_memory =
cgroup_enable_swap =
cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO;
- error("disabled CGROUP memory statistics.");
+ error("CGROUP: disabled memory statistics.");
}
}
if(cgroup_search_in_devices) {
if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) {
cgroup_search_in_devices = 0;
- error("disabled CGROUP devices statistics.");
+ error("CGROUP: disabled devices statistics.");
}
}
@@ -2508,13 +2596,13 @@ void update_cgroup_charts(int update_every) {
void *cgroups_main(void *ptr) {
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- info("CGROUP Plugin thread created with task id %d", gettid());
+ info("CGROUP plugin thread created with task id %d", gettid());
if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
- error("Cannot set pthread cancel type to DEFERRED.");
+ error("CGROUP: cannot set pthread cancel type to DEFERRED.");
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- error("Cannot set pthread cancel state to ENABLE.");
+ error("CGROUP: cannot set pthread cancel state to ENABLE.");
struct rusage thread;
diff --git a/src/sys_kernel_mm_ksm.c b/src/sys_kernel_mm_ksm.c
index 76d808538..6b04ef280 100644
--- a/src/sys_kernel_mm_ksm.c
+++ b/src/sys_kernel_mm_ksm.c
@@ -5,18 +5,18 @@ typedef struct ksm_name_value {
unsigned long long value;
} KSM_NAME_VALUE;
-#define PAGES_SHARED 0
-#define PAGES_SHARING 1
+#define PAGES_SHARED 0
+#define PAGES_SHARING 1
#define PAGES_UNSHARED 2
#define PAGES_VOLATILE 3
-#define PAGES_TO_SCAN 4
+#define PAGES_TO_SCAN 4
KSM_NAME_VALUE values[] = {
- [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL },
- [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL },
+ [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL },
+ [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL },
[PAGES_UNSHARED] = { "/sys/kernel/mm/ksm/pages_unshared", 0ULL },
[PAGES_VOLATILE] = { "/sys/kernel/mm/ksm/pages_volatile", 0ULL },
- [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL },
+ [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL },
};
int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
@@ -24,117 +24,168 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
static procfile *ff_pages_shared = NULL, *ff_pages_sharing = NULL, *ff_pages_unshared = NULL, *ff_pages_volatile = NULL, *ff_pages_to_scan = NULL;
static long page_size = -1;
- if(page_size == -1)
+ if(unlikely(page_size == -1))
page_size = sysconf(_SC_PAGESIZE);
- if(!ff_pages_shared) {
+ if(unlikely(!ff_pages_shared)) {
snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_shared");
snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_shared", values[PAGES_SHARED].filename));
ff_pages_shared = procfile_open(values[PAGES_SHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
}
- if(!ff_pages_sharing) {
+ if(unlikely(!ff_pages_sharing)) {
snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_sharing");
snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_sharing", values[PAGES_SHARING].filename));
ff_pages_sharing = procfile_open(values[PAGES_SHARING].filename, " \t:", PROCFILE_FLAG_DEFAULT);
}
- if(!ff_pages_unshared) {
+ if(unlikely(!ff_pages_unshared)) {
snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_unshared");
snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_unshared", values[PAGES_UNSHARED].filename));
ff_pages_unshared = procfile_open(values[PAGES_UNSHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
}
- if(!ff_pages_volatile) {
+ if(unlikely(!ff_pages_volatile)) {
snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_volatile");
snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_volatile", values[PAGES_VOLATILE].filename));
ff_pages_volatile = procfile_open(values[PAGES_VOLATILE].filename, " \t:", PROCFILE_FLAG_DEFAULT);
}
- if(!ff_pages_to_scan) {
+ if(unlikely(!ff_pages_to_scan)) {
snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_to_scan");
snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_to_scan", values[PAGES_TO_SCAN].filename));
ff_pages_to_scan = procfile_open(values[PAGES_TO_SCAN].filename, " \t:", PROCFILE_FLAG_DEFAULT);
}
- if(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile || !ff_pages_to_scan) return 1;
+ if(unlikely(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile || !ff_pages_to_scan))
+ return 1;
unsigned long long pages_shared = 0, pages_sharing = 0, pages_unshared = 0, pages_volatile = 0, pages_to_scan = 0, offered = 0, saved = 0;
ff_pages_shared = procfile_readall(ff_pages_shared);
- if(!ff_pages_shared) return 0; // we return 0, so that we will retry to open it next time
+ if(unlikely(!ff_pages_shared)) return 0; // we return 0, so that we will retry to open it next time
pages_shared = str2ull(procfile_lineword(ff_pages_shared, 0, 0));
ff_pages_sharing = procfile_readall(ff_pages_sharing);
- if(!ff_pages_sharing) return 0; // we return 0, so that we will retry to open it next time
+ if(unlikely(!ff_pages_sharing)) return 0; // we return 0, so that we will retry to open it next time
pages_sharing = str2ull(procfile_lineword(ff_pages_sharing, 0, 0));
ff_pages_unshared = procfile_readall(ff_pages_unshared);
- if(!ff_pages_unshared) return 0; // we return 0, so that we will retry to open it next time
+ if(unlikely(!ff_pages_unshared)) return 0; // we return 0, so that we will retry to open it next time
pages_unshared = str2ull(procfile_lineword(ff_pages_unshared, 0, 0));
ff_pages_volatile = procfile_readall(ff_pages_volatile);
- if(!ff_pages_volatile) return 0; // we return 0, so that we will retry to open it next time
+ if(unlikely(!ff_pages_volatile)) return 0; // we return 0, so that we will retry to open it next time
pages_volatile = str2ull(procfile_lineword(ff_pages_volatile, 0, 0));
ff_pages_to_scan = procfile_readall(ff_pages_to_scan);
- if(!ff_pages_to_scan) return 0; // we return 0, so that we will retry to open it next time
+ if(unlikely(!ff_pages_to_scan)) return 0; // we return 0, so that we will retry to open it next time
pages_to_scan = str2ull(procfile_lineword(ff_pages_to_scan, 0, 0));
offered = pages_sharing + pages_shared + pages_unshared + pages_volatile;
saved = pages_sharing - pages_shared;
- if(!offered || !pages_to_scan) return 0;
-
- RRDSET *st;
+ if(unlikely(!offered || !pages_to_scan)) return 0;
// --------------------------------------------------------------------
- st = rrdset_find_localhost("mem.ksm");
- if(!st) {
- st = rrdset_create_localhost("mem", "ksm", NULL, "ksm", NULL, "Kernel Same Page Merging", "MB", 5000
- , update_every, RRDSET_TYPE_AREA);
-
- rrddim_add(st, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ {
+ static RRDSET *st_mem_ksm = NULL;
+ static RRDDIM *rd_shared = NULL, *rd_unshared = NULL, *rd_sharing = NULL, *rd_volatile = NULL, *rd_to_scan = NULL;
+
+ if (unlikely(!st_mem_ksm)) {
+ st_mem_ksm = rrdset_create_localhost(
+ "mem"
+ , "ksm"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging"
+ , "MB"
+ , 5000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_shared = rrddim_add(st_mem_ksm, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_unshared = rrddim_add(st_mem_ksm, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_sharing = rrddim_add(st_mem_ksm, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_volatile = rrddim_add(st_mem_ksm, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_to_scan = rrddim_add(st_mem_ksm, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm);
+
+ rrddim_set_by_pointer(st_mem_ksm, rd_shared, pages_shared * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_unshared, pages_unshared * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_sharing, pages_sharing * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_volatile, pages_volatile * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_to_scan, pages_to_scan * page_size);
+
+ rrdset_done(st_mem_ksm);
}
- else rrdset_next(st);
-
- rrddim_set(st, "shared", pages_shared * page_size);
- rrddim_set(st, "unshared", pages_unshared * page_size);
- rrddim_set(st, "sharing", pages_sharing * page_size);
- rrddim_set(st, "volatile", pages_volatile * page_size);
- rrddim_set(st, "to_scan", pages_to_scan * page_size);
- rrdset_done(st);
-
- st = rrdset_find_localhost("mem.ksm_savings");
- if(!st) {
- st = rrdset_create_localhost("mem", "ksm_savings", NULL, "ksm", NULL, "Kernel Same Page Merging Savings", "MB"
- , 5001, update_every, RRDSET_TYPE_AREA);
-
- rrddim_add(st, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
- else rrdset_next(st);
-
- rrddim_set(st, "savings", saved * page_size);
- rrddim_set(st, "offered", offered * page_size);
- rrdset_done(st);
- st = rrdset_find_localhost("mem.ksm_ratios");
- if(!st) {
- st = rrdset_create_localhost("mem", "ksm_ratios", NULL, "ksm", NULL, "Kernel Same Page Merging Effectiveness"
- , "percentage", 5002, update_every, RRDSET_TYPE_LINE);
+ // --------------------------------------------------------------------
- rrddim_add(st, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
+ {
+ static RRDSET *st_mem_ksm_savings = NULL;
+ static RRDDIM *rd_savings = NULL, *rd_offered = NULL;
+
+ if (unlikely(!st_mem_ksm_savings)) {
+ st_mem_ksm_savings = rrdset_create_localhost(
+ "mem"
+ , "ksm_savings"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging Savings"
+ , "MB"
+ , 5001
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_savings = rrddim_add(st_mem_ksm_savings, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_offered = rrddim_add(st_mem_ksm_savings, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm_savings);
+
+ rrddim_set_by_pointer(st_mem_ksm_savings, rd_savings, saved * page_size);
+ rrddim_set_by_pointer(st_mem_ksm_savings, rd_offered, offered * page_size);
+
+ rrdset_done(st_mem_ksm_savings);
}
- else rrdset_next(st);
- rrddim_set(st, "savings", (saved * 1000000) / offered);
- rrdset_done(st);
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm_ratios = NULL;
+ static RRDDIM *rd_savings = NULL;
+
+ if (unlikely(!st_mem_ksm_ratios)) {
+ st_mem_ksm_ratios = rrdset_create_localhost(
+ "mem"
+ , "ksm_ratios"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging Effectiveness"
+ , "percentage"
+ , 5002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_savings = rrddim_add(st_mem_ksm_ratios, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm_ratios);
+
+ rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, (saved * 1000000) / offered);
+
+ rrdset_done(st_mem_ksm_ratios);
+ }
return 0;
}
diff --git a/src/unit_test.c b/src/unit_test.c
index 9b008138f..3c9632119 100644
--- a/src/unit_test.c
+++ b/src/unit_test.c
@@ -31,8 +31,8 @@ int check_storage_number(calculated_number n, int debug) {
CALCULATED_NUMBER_FORMAT " re-parsed from printed (diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n\n",
n,
d, s, ddiff, dcdiff,
- buffer,
- len, p, pdiff, pcdiff
+ buffer, len,
+ p, pdiff, pcdiff
);
if(len != strlen(buffer)) fprintf(stderr, "ERROR: printed number %s is reported to have length %zu but it has %zu\n", buffer, len, strlen(buffer));
if(dcdiff > ACCURACY_LOSS) fprintf(stderr, "WARNING: packing number " CALCULATED_NUMBER_FORMAT " has accuracy loss %0.7Lf %%\n", n, dcdiff);
@@ -45,6 +45,19 @@ int check_storage_number(calculated_number n, int debug) {
return 0;
}
+calculated_number storage_number_min(calculated_number n) {
+ calculated_number r = 1, last;
+
+ do {
+ last = n;
+ n /= 2.0;
+ storage_number t = pack_storage_number(n, SN_EXISTS);
+ r = unpack_storage_number(t);
+ } while(r != 0.0 && r != last);
+
+ return last;
+}
+
void benchmark_storage_number(int loop, int multiplier) {
int i, j;
calculated_number n, d;
@@ -73,10 +86,10 @@ void benchmark_storage_number(int loop, int multiplier) {
}
fprintf(stderr, "\nNETDATA FLOATING POINT\n");
- fprintf(stderr, "MIN POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_POSITIVE_MIN);
+ fprintf(stderr, "MIN POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", storage_number_min(1));
fprintf(stderr, "MAX POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_POSITIVE_MAX);
fprintf(stderr, "MIN NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_NEGATIVE_MIN);
- fprintf(stderr, "MAX NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", (calculated_number)STORAGE_NUMBER_NEGATIVE_MAX);
+ fprintf(stderr, "MAX NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", -storage_number_min(1));
fprintf(stderr, "Maximum accuracy loss: " CALCULATED_NUMBER_FORMAT "%%\n\n\n", (calculated_number)ACCURACY_LOSS);
// ------------------------------------------------------------------------
@@ -231,7 +244,7 @@ int unit_test_storage()
int unit_test_str2ld() {
char *values[] = {
- "1.234567", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10",
+ "1.2345678", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10",
"hello", "1wrong", "nan", "inf", NULL
};
@@ -269,6 +282,36 @@ int unit_test_str2ld() {
return 0;
}
+int unit_test_buffer() {
+ BUFFER *wb = buffer_create(1);
+ char string[2048 + 1];
+ char final[9000 + 1];
+ int i;
+
+ for(i = 0; i < 2048; i++)
+ string[i] = (char)((i % 24) + 'a');
+ string[2048] = '\0';
+
+ const char *fmt = "string1: %s\nstring2: %s\nstring3: %s\nstring4: %s";
+ buffer_sprintf(wb, fmt, string, string, string, string);
+ snprintfz(final, 9000, fmt, string, string, string, string);
+
+ const char *s = buffer_tostring(wb);
+
+ if(buffer_strlen(wb) != strlen(final) || strcmp(s, final) != 0) {
+ fprintf(stderr, "\nbuffer_sprintf() is faulty.\n");
+ fprintf(stderr, "\nstring : %s (length %zu)\n", string, strlen(string));
+ fprintf(stderr, "\nbuffer : %s (length %zu)\n", s, buffer_strlen(wb));
+ fprintf(stderr, "\nexpected: %s (length %zu)\n", final, strlen(final));
+ buffer_free(wb);
+ return -1;
+ }
+
+ fprintf(stderr, "buffer_sprintf() works as expected.\n");
+ buffer_free(wb);
+ return 0;
+}
+
// --------------------------------------------------------------------------------------------------------------------
struct feed_values {
@@ -994,7 +1037,7 @@ int run_test(struct test *test)
for(c = 0 ; c < max ; c++) {
calculated_number v = unpack_storage_number(rd->values[c]);
calculated_number n = test->results[c];
- int same = (roundl(v * 10000000.0) == roundl(n * 10000000.0))?1:0;
+ int same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0;
fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n",
test->name, rd->name, c+1,
(rrdset_first_entry_t(st) + c * st->update_every) - time_start,
@@ -1005,7 +1048,7 @@ int run_test(struct test *test)
if(rd2) {
v = unpack_storage_number(rd2->values[c]);
n = test->results2[c];
- same = (roundl(v * 10000000.0) == roundl(n * 10000000.0))?1:0;
+ same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0;
fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n",
test->name, rd2->name, c+1,
(rrdset_first_entry_t(st) + c * st->update_every) - time_start,
@@ -1019,8 +1062,7 @@ int run_test(struct test *test)
static int test_variable_renames(void) {
fprintf(stderr, "Creating chart\n");
- RRDSET *st = rrdset_create_localhost("chart", "ID", NULL, "family", "context", "Unit Testing", "a value", 1, 1
- , RRDSET_TYPE_LINE);
+ RRDSET *st = rrdset_create_localhost("chart", "ID", NULL, "family", "context", "Unit Testing", "a value", 1, 1, RRDSET_TYPE_LINE);
fprintf(stderr, "Created chart with id '%s', name '%s'\n", st->id, st->name);
fprintf(stderr, "Creating dimension DIM1\n");
diff --git a/src/unit_test.h b/src/unit_test.h
index 3240b5f0e..68ed61fcb 100644
--- a/src/unit_test.h
+++ b/src/unit_test.h
@@ -5,5 +5,6 @@ extern int unit_test_storage(void);
extern int unit_test(long delay, long shift);
extern int run_all_mockup_tests(void);
extern int unit_test_str2ld(void);
+extern int unit_test_buffer(void);
#endif /* NETDATA_UNIT_TEST_H */
diff --git a/src/web_api_v1.c b/src/web_api_v1.c
index 3ffd8c324..9514f8dbd 100644
--- a/src/web_api_v1.c
+++ b/src/web_api_v1.c
@@ -816,7 +816,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
#endif /* NETDATA_INTERNAL_CHECKS */
}
- if(respect_web_browser_do_not_track_policy && w->donottrack) {
+ if(respect_web_browser_do_not_track_policy && web_client_has_donottrack(w)) {
buffer_flush(w->response.data);
buffer_sprintf(w->response.data, "Your web browser is sending 'DNT: 1' (Do Not Track). The registry requires persistent cookies on your browser to work.");
return 400;
@@ -853,19 +853,19 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
switch(action) {
case 'A':
- w->tracking_required = 1;
+ web_client_enable_tracking_required(w);
return registry_request_access_json(host, w, person_guid, machine_guid, machine_url, url_name, now_realtime_sec());
case 'D':
- w->tracking_required = 1;
+ web_client_enable_tracking_required(w);
return registry_request_delete_json(host, w, person_guid, machine_guid, machine_url, delete_url, now_realtime_sec());
case 'S':
- w->tracking_required = 1;
+ web_client_enable_tracking_required(w);
return registry_request_search_json(host, w, person_guid, machine_guid, machine_url, search_machine_guid, now_realtime_sec());
case 'W':
- w->tracking_required = 1;
+ web_client_enable_tracking_required(w);
return registry_request_switch_json(host, w, person_guid, machine_guid, machine_url, to_person_guid, now_realtime_sec());
case 'H':
diff --git a/src/web_buffer.c b/src/web_buffer.c
index 9f9ceda63..f5452452f 100644
--- a/src/web_buffer.c
+++ b/src/web_buffer.c
@@ -84,6 +84,19 @@ inline char *print_number_llu_r(char *str, unsigned long long uvalue) {
return wstr;
}
+inline char *print_number_llu_r_smart(char *str, unsigned long long uvalue) {
+#ifdef ENVIRONMENT32
+ if(uvalue > (unsigned long long)0xffffffff)
+ str = print_number_llu_r(str, uvalue);
+ else
+ str = print_number_lu_r(str, uvalue);
+#else
+ do *str++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+#endif
+
+ return str;
+}
+
void buffer_print_llu(BUFFER *wb, unsigned long long uvalue)
{
buffer_need_bytes(wb, 50);
@@ -201,29 +214,25 @@ void buffer_sprintf(BUFFER *wb, const char *fmt, ...)
{
if(unlikely(!fmt || !*fmt)) return;
- buffer_need_bytes(wb, 2);
-
- size_t len = wb->size - wb->len - 1;
- size_t wrote;
-
va_list args;
- va_start(args, fmt);
- wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
- va_end(args);
+ size_t wrote = 0, need = 2, multiplier = 0, len;
+
+ do {
+ need += wrote + multiplier * WEB_DATA_LENGTH_INCREASE_STEP;
+ multiplier++;
- if(unlikely(wrote >= len)) {
- // truncated
- buffer_overflow_check(wb);
+ debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote);
+ buffer_need_bytes(wb, need);
- debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu\n", wb->len, wb->size);
- buffer_need_bytes(wb, len + WEB_DATA_LENGTH_INCREASE_STEP);
+ len = wb->size - wb->len - 1;
va_start(args, fmt);
- buffer_vsprintf(wb, fmt, args);
+ wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
va_end(args);
- }
- else
- wb->len += wrote;
+
+ } while(wrote >= len);
+
+ wb->len += wrote;
// the buffer is \0 terminated by vsnprintf
}
diff --git a/src/web_buffer.h b/src/web_buffer.h
index 8f0d29cd2..177abc0a8 100644
--- a/src/web_buffer.h
+++ b/src/web_buffer.h
@@ -71,6 +71,7 @@ extern void buffer_char_replace(BUFFER *wb, char from, char to);
extern char *print_number_lu_r(char *str, unsigned long uvalue);
extern char *print_number_llu_r(char *str, unsigned long long uvalue);
+extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue);
extern void buffer_print_llu(BUFFER *wb, unsigned long long uvalue);
diff --git a/src/web_buffer_svg.c b/src/web_buffer_svg.c
index 2591799d4..287bbd6b8 100644
--- a/src/web_buffer_svg.c
+++ b/src/web_buffer_svg.c
@@ -389,10 +389,13 @@ static inline char *format_value_with_precision_and_unit(char *value_string, siz
len = snprintfz(value_string, value_string_len, "%0.0Lf", (long double) value);
trim_zeros = 0;
}
- else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1Lf", (long double) value);
- else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2Lf", (long double) value);
- else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2Lf", (long double) value);
- else len = snprintfz(value_string, value_string_len, "%0.4Lf", (long double) value);
+ else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1Lf", (long double) value);
+ else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2Lf", (long double) value);
+ else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2Lf", (long double) value);
+ else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4Lf", (long double) value);
+ else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5Lf", (long double) value);
+ else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6Lf", (long double) value);
+ else len = snprintfz(value_string, value_string_len, "%0.7Lf", (long double) value);
if(unlikely(trim_zeros)) {
int l;
diff --git a/src/web_client.c b/src/web_client.c
index 7da080705..6ec3e11e3 100644
--- a/src/web_client.c
+++ b/src/web_client.c
@@ -17,10 +17,11 @@ unsigned long long web_clients_count = 0;
static inline int web_client_crock_socket(struct web_client *w) {
#ifdef TCP_CORK
- if(likely(!w->tcp_cork && w->ofd != -1)) {
+ if(likely(web_client_is_corkable(w) && !w->tcp_cork && w->ofd != -1)) {
w->tcp_cork = 1;
if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) {
error("%llu: failed to enable TCP_CORK on socket.", w->id);
+
w->tcp_cork = 0;
return -1;
}
@@ -78,7 +79,7 @@ struct web_client *web_client_create(int listener) {
w->response.header = buffer_create(HTTP_RESPONSE_HEADER_SIZE);
w->response.header_output = buffer_create(HTTP_RESPONSE_HEADER_SIZE);
w->origin[0] = '*';
- w->wait_receive = 1;
+ web_client_enable_wait_receive(w);
if(web_clients) web_clients->prev = w;
w->next = web_clients;
@@ -150,9 +151,9 @@ void web_client_reset(struct web_client *w) {
w->mode = WEB_CLIENT_MODE_NORMAL;
w->tcp_cork = 0;
- w->donottrack = 0;
- w->tracking_required = 0;
- w->keepalive = 0;
+ web_client_disable_donottrack(w);
+ web_client_disable_tracking_required(w);
+ web_client_disable_keepalive(w);
w->decoded_url[0] = '\0';
buffer_reset(w->response.header_output);
@@ -162,8 +163,8 @@ void web_client_reset(struct web_client *w) {
w->response.sent = 0;
w->response.code = 0;
- w->wait_receive = 1;
- w->wait_send = 0;
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
w->response.zoutput = 0;
@@ -210,14 +211,18 @@ uid_t web_files_uid(void) {
static uid_t owner_uid = 0;
if(unlikely(!web_owner)) {
- web_owner = config_get(CONFIG_SECTION_WEB, "web files owner", config_get(CONFIG_SECTION_GLOBAL, "run as user", ""));
+ // getpwuid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct passwd *pw = getpwuid(geteuid());
+ web_owner = config_get(CONFIG_SECTION_WEB, "web files owner", (pw)?(pw->pw_name?pw->pw_name:""):"");
if(!web_owner || !*web_owner)
owner_uid = geteuid();
else {
// getpwnam() is not thread safe,
// but we have called this function once
// while single threaded
- struct passwd *pw = getpwnam(web_owner);
+ pw = getpwnam(web_owner);
if(!pw) {
error("User '%s' is not present. Ignoring option.", web_owner);
owner_uid = geteuid();
@@ -237,14 +242,18 @@ gid_t web_files_gid(void) {
static gid_t owner_gid = 0;
if(unlikely(!web_group)) {
- web_group = config_get(CONFIG_SECTION_WEB, "web files group", config_get(CONFIG_SECTION_WEB, "web files owner", ""));
+ // getgrgid() is not thread safe,
+ // but we have called this function once
+ // while single threaded
+ struct group *gr = getgrgid(getegid());
+ web_group = config_get(CONFIG_SECTION_WEB, "web files group", (gr)?(gr->gr_name?gr->gr_name:""):"");
if(!web_group || !*web_group)
owner_gid = getegid();
else {
// getgrnam() is not thread safe,
// but we have called this function once
// while single threaded
- struct group *gr = getgrnam(web_group);
+ gr = getgrnam(web_group);
if(!gr) {
error("Group '%s' is not present. Ignoring option.", web_group);
owner_gid = getegid();
@@ -383,8 +392,8 @@ int mysendfile(struct web_client *w, char *filename) {
debug(D_WEB_CLIENT_ACCESS, "%llu: Sending file '%s' (%ld bytes, ifd %d, ofd %d).", w->id, webfilename, stat.st_size, w->ifd, w->ofd);
w->mode = WEB_CLIENT_MODE_FILECOPY;
- w->wait_receive = 1;
- w->wait_send = 0;
+ web_client_enable_wait_receive(w);
+ web_client_disable_wait_send(w);
buffer_flush(w->response.data);
w->response.rlen = stat.st_size;
#ifdef __APPLE__
@@ -723,11 +732,11 @@ static inline char *http_header_parse(struct web_client *w, char *s) {
else if(hash == hash_connection && !strcasecmp(s, "Connection")) {
if(strcasestr(v, "keep-alive"))
- w->keepalive = 1;
+ web_client_enable_keepalive(w);
}
else if(respect_web_browser_do_not_track_policy && hash == hash_donottrack && !strcasecmp(s, "DNT")) {
- if(*v == '0') w->donottrack = 0;
- else if(*v == '1') w->donottrack = 1;
+ if(*v == '0') web_client_disable_donottrack(w);
+ else if(*v == '1') web_client_enable_donottrack(w);
}
#ifdef NETDATA_WITH_ZLIB
else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) {
@@ -776,7 +785,7 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
w->mode = WEB_CLIENT_MODE_STREAM;
}
else {
- w->wait_receive = 0;
+ web_client_disable_wait_receive(w);
return HTTP_VALIDATION_NOT_SUPPORTED;
}
@@ -792,7 +801,7 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
// incomplete requests
if(unlikely(!*s)) {
- w->wait_receive = 1;
+ web_client_enable_wait_receive(w);
return HTTP_VALIDATION_INCOMPLETE;
}
@@ -823,7 +832,7 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
// FIXME -- we should avoid it
strncpyz(w->last_url, w->decoded_url, URL_MAX);
- w->wait_receive = 0;
+ web_client_disable_wait_receive(w);
return HTTP_VALIDATION_OK;
}
@@ -833,7 +842,7 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
}
// incomplete request
- w->wait_receive = 1;
+ web_client_enable_wait_receive(w);
return HTTP_VALIDATION_INCOMPLETE;
}
@@ -876,7 +885,7 @@ static inline void web_client_send_http_header(struct web_client *w) {
"Content-Type: %s\r\n"
"Date: %s\r\n"
, w->response.code, code_msg
- , w->keepalive?"keep-alive":"close"
+ , web_client_has_keepalive(w)?"keep-alive":"close"
, w->origin
, content_type_string
, date
@@ -904,7 +913,7 @@ static inline void web_client_send_http_header(struct web_client *w) {
}
else {
if(respect_web_browser_do_not_track_policy) {
- if(w->tracking_required)
+ if(web_client_has_tracking_required(w))
buffer_sprintf(w->response.header_output,
"Tk: T;cookies\r\n");
else
@@ -946,7 +955,7 @@ static inline void web_client_send_http_header(struct web_client *w) {
}
else {
// we don't know the content length, disable keep-alive
- w->keepalive = 0;
+ web_client_disable_keepalive(w);
}
}
@@ -1234,8 +1243,8 @@ void web_client_process_request(struct web_client *w) {
web_client_send_http_header(w);
// enable sending immediately if we have data
- if(w->response.data->len) w->wait_send = 1;
- else w->wait_send = 0;
+ if(w->response.data->len) web_client_enable_wait_send(w);
+ else web_client_disable_wait_send(w);
switch(w->mode) {
case WEB_CLIENT_MODE_STREAM:
@@ -1253,7 +1262,7 @@ void web_client_process_request(struct web_client *w) {
case WEB_CLIENT_MODE_FILECOPY:
if(w->response.rlen) {
debug(D_WEB_CLIENT, "%llu: Done preparing the response. Will be sending data file of %zu bytes to client.", w->id, w->response.rlen);
- w->wait_receive = 1;
+ web_client_enable_wait_receive(w);
/*
// utilize the kernel sendfile() for copying the file to the socket.
@@ -1368,14 +1377,14 @@ ssize_t web_client_send_deflate(struct web_client *w)
if(t < 0) return t;
}
- if(w->mode == WEB_CLIENT_MODE_FILECOPY && w->wait_receive && w->response.rlen && w->response.rlen > w->response.data->len) {
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
// we have to wait, more data will come
debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
- w->wait_send = 0;
+ web_client_disable_wait_send(w);
return t;
}
- if(unlikely(!w->keepalive)) {
+ if(unlikely(!web_client_has_keepalive(w))) {
debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
WEB_CLIENT_IS_DEAD(w);
return t;
@@ -1411,7 +1420,7 @@ ssize_t web_client_send_deflate(struct web_client *w)
// ask for FINISH if we have all the input
int flush = Z_SYNC_FLUSH;
if(w->mode == WEB_CLIENT_MODE_NORMAL
- || (w->mode == WEB_CLIENT_MODE_FILECOPY && !w->wait_receive && w->response.data->len == w->response.rlen)) {
+ || (w->mode == WEB_CLIENT_MODE_FILECOPY && !web_client_has_wait_receive(w) && w->response.data->len == w->response.rlen)) {
flush = Z_FINISH;
debug(D_DEFLATE, "%llu: Requesting Z_FINISH, if possible.", w->id);
}
@@ -1480,14 +1489,14 @@ ssize_t web_client_send(struct web_client *w) {
// A. we have done everything
// B. we temporarily have nothing to send, waiting for the buffer to be filled by ifd
- if(w->mode == WEB_CLIENT_MODE_FILECOPY && w->wait_receive && w->response.rlen && w->response.rlen > w->response.data->len) {
+ if(w->mode == WEB_CLIENT_MODE_FILECOPY && web_client_has_wait_receive(w) && w->response.rlen && w->response.rlen > w->response.data->len) {
// we have to wait, more data will come
debug(D_WEB_CLIENT, "%llu: Waiting for more data to become available.", w->id);
- w->wait_send = 0;
+ web_client_disable_wait_send(w);
return 0;
}
- if(unlikely(!w->keepalive)) {
+ if(unlikely(!web_client_has_keepalive(w))) {
debug(D_WEB_CLIENT, "%llu: Closing (keep-alive is not enabled). %zu bytes sent.", w->id, w->response.sent);
WEB_CLIENT_IS_DEAD(w);
return 0;
@@ -1541,10 +1550,10 @@ ssize_t web_client_receive(struct web_client *w)
debug(D_WEB_DATA, "%llu: Received data: '%s'.", w->id, &w->response.data->buffer[old]);
if(w->mode == WEB_CLIENT_MODE_FILECOPY) {
- w->wait_send = 1;
+ web_client_enable_wait_send(w);
if(w->response.rlen && w->response.data->len >= w->response.rlen)
- w->wait_receive = 0;
+ web_client_disable_wait_receive(w);
}
}
else if(likely(bytes == 0)) {
@@ -1557,7 +1566,7 @@ ssize_t web_client_receive(struct web_client *w)
if(w->mode == WEB_CLIENT_MODE_FILECOPY) {
// we are copying data from ifd to ofd
// let it finish copying...
- w->wait_receive = 0;
+ web_client_disable_wait_receive(w);
debug(D_WEB_CLIENT, "%llu: Read the whole file.", w->id);
if(w->ifd != w->ofd) close(w->ifd);
@@ -1603,11 +1612,11 @@ void *web_client_main(void *ptr)
for(;;) {
if(unlikely(netdata_exit)) break;
- if(unlikely(w->dead)) {
+ if(unlikely(web_client_check_dead(w))) {
debug(D_WEB_CLIENT, "%llu: client is dead.", w->id);
break;
}
- else if(unlikely(!w->wait_receive && !w->wait_send)) {
+ else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) {
debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id);
break;
}
@@ -1622,8 +1631,8 @@ void *web_client_main(void *ptr)
fds[0].events = 0;
fds[0].revents = 0;
- if(w->wait_receive) fds[0].events |= POLLIN;
- if(w->wait_send) fds[0].events |= POLLOUT;
+ if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
+ if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT;
fds[1].fd = -1;
fds[1].events = 0;
@@ -1637,19 +1646,19 @@ void *web_client_main(void *ptr)
fds[0].fd = w->ifd;
fds[0].events = 0;
fds[0].revents = 0;
- if(w->wait_receive) fds[0].events |= POLLIN;
+ if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
ifd = &fds[0];
fds[1].fd = w->ofd;
fds[1].events = 0;
fds[1].revents = 0;
- if(w->wait_send) fds[1].events |= POLLOUT;
+ if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT;
ofd = &fds[1];
fdmax = 2;
}
- debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, w->wait_receive?"INPUT":"", w->wait_send?"OUTPUT":"");
+ debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
errno = 0;
timeout = web_client_timeout * 1000;
retval = poll(fds, fdmax, timeout);
@@ -1666,14 +1675,14 @@ void *web_client_main(void *ptr)
break;
}
else if(unlikely(!retval)) {
- debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, w->wait_receive?"INPUT":"", w->wait_send?"OUTPUT":"");
+ debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
break;
}
if(unlikely(netdata_exit)) break;
int used = 0;
- if(w->wait_send && ofd->revents & POLLOUT) {
+ if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) {
used++;
if(web_client_send(w) < 0) {
debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
@@ -1683,7 +1692,7 @@ void *web_client_main(void *ptr)
if(unlikely(netdata_exit)) break;
- if(w->wait_receive && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) {
+ if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) {
used++;
if(web_client_receive(w) < 0) {
debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id);
@@ -1724,7 +1733,7 @@ void *web_client_main(void *ptr)
w->ifd = -1;
w->ofd = -1;
- w->obsolete = 1;
+ WEB_CLIENT_IS_OBSOLETE(w);
pthread_exit(NULL);
return NULL;
diff --git a/src/web_client.h b/src/web_client.h
index 617917df0..126a494f0 100644
--- a/src/web_client.h
+++ b/src/web_client.h
@@ -20,6 +20,66 @@ typedef enum web_client_mode {
WEB_CLIENT_MODE_STREAM = 3
} WEB_CLIENT_MODE;
+typedef enum web_client_flags {
+ WEB_CLIENT_FLAG_OBSOLETE = 1 << 0, // if set, the listener will remove this client
+ // after setting this, you should not touch
+ // this web_client
+
+ WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead
+
+ WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used
+
+ WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data
+ WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client
+
+ WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client
+ WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies
+
+ WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket
+ WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8 // if set, the client is using a UNIX socket
+} WEB_CLIENT_FLAGS;
+
+//#ifdef HAVE_C___ATOMIC
+//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
+//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
+//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
+//#else
+#define web_client_flag_check(w, flag) ((w)->flags & flag)
+#define web_client_flag_set(w, flag) (w)->flags |= flag
+#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
+//#endif
+
+#define WEB_CLIENT_IS_OBSOLETE(w) web_client_flag_set(w, WEB_CLIENT_FLAG_OBSOLETE)
+#define web_client_check_obsolete(w) web_client_flag_check(w, WEB_CLIENT_FLAG_OBSOLETE)
+
+#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
+#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
+
+#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
+
+#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+
+#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+
+#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+
+#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
+
+#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+
+#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
#define URL_MAX 8192
#define ZLIB_CHUNK 16384
#define HTTP_RESPONSE_HEADER_SIZE 4096
@@ -50,20 +110,7 @@ struct response {
struct web_client {
unsigned long long id;
- uint8_t obsolete:1; // if set to 1, the listener will remove this client
- // after setting this to 1, you should not touch
- // this web_client
-
- uint8_t dead:1; // if set to 1, this client is dead
-
- uint8_t keepalive:1; // if set to 1, the web client will be re-used
-
- uint8_t wait_receive:1; // 1 = we are waiting more input data
- uint8_t wait_send:1; // 1 = we have data to send to the client
-
- uint8_t donottrack:1; // 1 = we should not set cookies on this client
- uint8_t tracking_required:1; // 1 = if the request requires cookies
-
+ WEB_CLIENT_FLAGS flags; // status flags for the client
WEB_CLIENT_MODE mode; // the operational mode of the client
int tcp_cork; // 1 = we have a cork on the socket
@@ -94,8 +141,6 @@ struct web_client {
struct web_client *next;
};
-#define WEB_CLIENT_IS_DEAD(w) (w)->dead=1
-
extern struct web_client *web_clients;
extern uid_t web_files_uid(void);
diff --git a/src/web_server.c b/src/web_server.c
index 491cd11aa..72168d15b 100644
--- a/src/web_server.c
+++ b/src/web_server.c
@@ -89,7 +89,7 @@ static inline void cleanup_web_clients(void) {
struct web_client *w;
for (w = web_clients; w;) {
- if (w->obsolete) {
+ if (web_client_check_obsolete(w)) {
debug(D_WEB_CLIENT, "%llu: Removing client.", w->id);
// pthread_cancel(w->thread);
// pthread_join(w->thread, NULL);
@@ -170,11 +170,11 @@ void *socket_listen_main_multi_threaded(void *ptr) {
if(pthread_create(&w->thread, NULL, web_client_main, w) != 0) {
error("%llu: failed to create new thread for web client.", w->id);
- w->obsolete = 1;
+ WEB_CLIENT_IS_OBSOLETE(w);
}
else if(pthread_detach(w->thread) != 0) {
error("%llu: Cannot request detach of newly created web client thread.", w->id);
- w->obsolete = 1;
+ WEB_CLIENT_IS_OBSOLETE(w);
}
}
}
@@ -200,7 +200,7 @@ void *socket_listen_main_multi_threaded(void *ptr) {
struct web_client *single_threaded_clients[FD_SETSIZE];
static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) {
- if(unlikely(w->obsolete || w->dead || (!w->wait_receive && !w->wait_send)))
+ if(unlikely(web_client_check_obsolete(w) || web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
return 1;
if(unlikely(w->ifd < 0 || w->ifd >= FD_SETSIZE || w->ofd < 0 || w->ofd >= FD_SETSIZE)) {
@@ -216,8 +216,8 @@ static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds
FD_SET(w->ofd, efds);
}
- if(w->wait_receive) FD_SET(w->ifd, ifds);
- if(w->wait_send) FD_SET(w->ofd, ofds);
+ if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds);
+ if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds);
single_threaded_clients[w->ifd] = w;
single_threaded_clients[w->ofd] = w;
@@ -229,13 +229,13 @@ static inline int single_threaded_unlink_client(struct web_client *w, fd_set *if
FD_CLR(w->ifd, efds);
if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds);
- if(w->wait_receive) FD_CLR(w->ifd, ifds);
- if(w->wait_send) FD_CLR(w->ofd, ofds);
+ if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds);
+ if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds);
single_threaded_clients[w->ifd] = NULL;
single_threaded_clients[w->ofd] = NULL;
- if(unlikely(w->obsolete || w->dead || (!w->wait_receive && !w->wait_send)))
+ if(unlikely(web_client_check_obsolete(w) || web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
return 1;
return 0;
@@ -302,6 +302,12 @@ void *socket_listen_main_single_threaded(void *ptr) {
if (FD_ISSET(api_sockets.fds[i], &rifds)) {
debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection.");
w = web_client_create(api_sockets.fds[i]);
+
+ if(api_sockets.fds_families[i] == AF_UNIX)
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) {
web_client_free(w);
}
@@ -326,7 +332,7 @@ void *socket_listen_main_single_threaded(void *ptr) {
continue;
}
- if (unlikely(w->wait_receive && FD_ISSET(w->ifd, &rifds))) {
+ if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) {
if (unlikely(web_client_receive(w) < 0)) {
web_client_free(w);
continue;
@@ -338,7 +344,7 @@ void *socket_listen_main_single_threaded(void *ptr) {
}
}
- if (unlikely(w->wait_send && FD_ISSET(w->ofd, &rofds))) {
+ if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) {
if (unlikely(web_client_send(w) < 0)) {
debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
web_client_free(w);
diff --git a/system/netdata-init-d.in b/system/netdata-init-d.in
index 468ceee2a..90a4b95be 100644
--- a/system/netdata-init-d.in
+++ b/system/netdata-init-d.in
@@ -15,7 +15,7 @@ DAEMON="netdata"
DAEMON_PATH=@sbindir_POST@
PIDFILE=@localstatedir_POST@/run/$DAEMON.pid
DAEMONOPTS="-P $PIDFILE"
-STOP_TIMEOUT="10"
+STOP_TIMEOUT="60"
[ -e /etc/sysconfig/$DAEMON ] && . /etc/sysconfig/$DAEMON
diff --git a/system/netdata-openrc.in b/system/netdata-openrc.in
index 55808364c..465e1232c 100644
--- a/system/netdata-openrc.in
+++ b/system/netdata-openrc.in
@@ -10,7 +10,7 @@
# The timeout in seconds to wait for netdata
# to save its database on disk and exit.
-: ${NETDATA_WAIT_EXIT_TIMEOUT:=15}
+: ${NETDATA_WAIT_EXIT_TIMEOUT:=60}
# When set to 1, if netdata does not exit in
# NETDATA_WAIT_EXIT_TIMEOUT, we will force it
diff --git a/system/netdata.service.in b/system/netdata.service.in
index 6bbb84eb5..1d4af988a 100644
--- a/system/netdata.service.in
+++ b/system/netdata.service.in
@@ -1,15 +1,41 @@
[Unit]
Description=Real time performance monitoring
-After=network.target httpd.service squid.service nfs-server.service mysqld.service mysql.service named.service postfix.service
+After=network.target httpd.service squid.service nfs-server.service mysqld.service mysql.service named.service postfix.service chronyd.service
[Service]
Type=simple
User=netdata
Group=netdata
-ExecStart=@sbindir_POST@/netdata -D
+RuntimeDirectory=netdata
+RuntimeDirectoryMode=0775
+ExecStart=@sbindir_POST@/netdata -P /run/netdata/netdata.pid -D
# saving a big db on slow disks may need some time
TimeoutStopSec=60
+# restart netdata if it crashes
+Restart=on-failure
+RestartSec=30
+
+# The minimum netdata Out-Of-Memory (OOM) score.
+# netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
+# To decrease it, set the minimum here and set the same or a higher value in netdata.conf.
+# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata).
+#OOMScoreAdjust=0
+
+# By default netdata switches to scheduling policy idle, which makes it use CPU, only
+# when there is spare available.
+# Valid policies: other (the system default) | batch | idle | fifo | rr
+#CPUSchedulingPolicy=other
+
+# This sets the maximum scheduling priority netdata can set (for policies: rr and fifo).
+# netdata (via [global].process scheduling priority in netdata.conf) can only lower this value.
+# Priority gets values 1 (lowest) to 99 (highest).
+#CPUSchedulingPriority=1
+
+# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata.
+# netdata (via [global].process nice level in netdata.conf) can only increase the value set here.
+#Nice=0
+
[Install]
WantedBy=multi-user.target
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 000000000..fe07653f1
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,18 @@
+MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ web/lib/jasmine-jquery.js \
+ web/easypiechart.chart.spec.js \
+ web/easypiechart.percentage.spec.js \
+ web/karma.conf.js \
+ web/fixtures/easypiechart.chart.fixture1.html \
+ node.d/fronius.chart.spec.js \
+ node.d/fronius.parse.spec.js \
+ node.d/fronius.process.spec.js \
+ node.d/fronius.validation.spec.js \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ stress.sh \
+ $(NULL)
diff --git a/tests/Makefile.in b/tests/Makefile.in
new file mode 100644
index 000000000..ff5bd2f28
--- /dev/null
+++ b/tests/Makefile.in
@@ -0,0 +1,472 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = tests
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
+ $(top_srcdir)/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
+ $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ web/lib/jasmine-jquery.js \
+ web/easypiechart.chart.spec.js \
+ web/easypiechart.percentage.spec.js \
+ web/karma.conf.js \
+ web/fixtures/easypiechart.chart.fixture1.html \
+ node.d/fronius.chart.spec.js \
+ node.d/fronius.parse.spec.js \
+ node.d/fronius.process.spec.js \
+ node.d/fronius.validation.spec.js \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ stress.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tests/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu tests/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 000000000..4fc9b303b
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,135 @@
+This readme is a manual on how to get started with unit testing on javascript and nodejs
+
+Original author: BrainDoctor (github), July 2017
+
+# Installation
+
+Tested on Linux Mint 18.2 Sara (Ubuntu/debian derivative)
+
+Make sure you are the user who is developer (permissions, except sudo ofc)
+
+```sh
+sudo apt-get install nodejs npm chromium-browser
+
+cd /path/to/your/netdata
+npm install
+```
+
+That should install the necessary node modules.
+
+Other browsers work too (Chrome, Firefox). However, only the Chromium Browser 59 has been tested for headless unit testing.
+
+## Versions
+
+The commands above leave me with the following versions (July 2017):
+
+ - nodejs: v4.2.6
+ - npm: 3.5.2
+ - chromium-browser: 59.0.3071.109
+ - WebStorm (optional): 2017.1.4
+
+# Configuration
+
+## NPM
+
+The dependencies are installed in `netdata/package.json`. If you install a new NPM module, it gets added here. Future developers just need to execute `npm install` and every dep gets added automatically.
+
+## Karma
+
+Karma configuration is in `tests/web/karma.conf.js`. Documentation is provided via comments.
+
+## WebStorm
+
+If you use the JetBrains WebStorm IDE, you can integrate the karma runtime.
+
+### for Karma (Client side testing)
+
+Headless Chromium:
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Headless Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: ChromiumHeadless
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
+
+GUI Chromium is similar:
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: Chromium
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
+
+You may add other browsers too (comma separated). With the "Browsers to start" field you can override any settings in karma.conf.js.
+
+Also it is recommended to install WebStorm IDE Extension/Addon to Chrome/Chromium for awesome debugging.
+
+### for node.d plugins (nodejs)
+
+1. Run > Edit Configurations
+2. "+" > Node.js
+3. - Name: Node.d plugins
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
+ - Application parameters: --captureExceptions tests/node.d
+
+# Running
+
+## In WebStorm
+
+### Karma
+Just run the configured run configurations and they produce nice test trees:
+
+![karma_run_2](https://user-images.githubusercontent.com/12159026/28277789-559149f6-6b1b-11e7-9cc7-a81d81d12c35.png)
+
+### node.js
+
+Debugging is awesome too!
+![node_debug](https://user-images.githubusercontent.com/12159026/28277879-8beee5ee-6b1b-11e7-9356-3156956f2282.png)
+
+## From CLI
+
+### Karma
+
+```sh
+cd /path/to/your/netdata
+
+nodejs ./node_modules/karma/bin/karma start tests/web/karma.conf.js --single-run=true --browsers=ChromiumHeadless
+```
+will start the karma server, start chromium in headless mode and exit.
+
+If a test fails, it produces even a stack trace:
+![karma_run_1](https://user-images.githubusercontent.com/12159026/28277754-3682bebe-6b1b-11e7-8b7e-66b23d87177d.png)
+
+### Node.d plugins
+
+```sh
+cd /path/to/your/netdata
+
+nodejs node_modules/jasmine-node/bin/jasmine-node --captureExceptions tests/node.d
+```
+
+will run the tests in `tests/node.d` and produce a stacktrace too on error:
+![node_run](https://user-images.githubusercontent.com/12159026/28277812-65bb69b0-6b1b-11e7-8500-bcdbb3436574.png)
+
+## Coverage
+
+### Karma
+
+A nice HTML is produced from Karma which shows which code paths were executed. It is located somewhere in `/path/to/your/netdata/coverage/`
+
+![coverage_2](https://user-images.githubusercontent.com/12159026/28277719-142146c4-6b1b-11e7-9992-3e88dee2efd2.png)
+and
+![coverage_1](https://user-images.githubusercontent.com/12159026/28277687-fa93e360-6b1a-11e7-995f-cbb4c5d012a7.png)
+
+### Node.d
+
+Apparently, jasmine-node can produce a junit report with the `--junitreport` flag. But that output was not very useful. Maybe it's configurable?
+
+## CI
+
+The karma and node.d runners can be integrated in Travis (AFAIK), but that is outside my ability.
+
+Note: Karma is for browser-testing. On a build server, no GUI or browser might by available, unless browsers support headless mode.
diff --git a/tests/node.d/fronius.chart.spec.js b/tests/node.d/fronius.chart.spec.js
new file mode 100644
index 000000000..5404e82f5
--- /dev/null
+++ b/tests/node.d/fronius.chart.spec.js
@@ -0,0 +1,161 @@
+"use strict";
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "chart",
+ module: this
+});
+
+describe("fronius chart creation", function () {
+
+ var chartPrefix = "fronius_chart.";
+
+ beforeAll(function () {
+ // change this to enable debug log
+ netdata.options.DEBUG = false;
+ });
+
+ afterAll(function () {
+ deleteProperties(subject.charts)
+ });
+
+ it("should return a basic chart dimension", function () {
+ var result = subject.createBasicDimension("id", "name", 2);
+
+ expect(result.divisor).toBe(2);
+ expect(result.id).toBe("id");
+ expect(result.algorithm).toEqual(netdata.chartAlgorithms.absolute);
+ expect(result.multiplier).toBe(1);
+ });
+
+ it("should return the power chart definition", function () {
+ var suffix = "power";
+ var result = subject.getSitePowerChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("power");
+ expect(result.context).toBe("fronius.power");
+ expect(result.dimensions[subject.powerGridId].name).toBe("grid");
+ expect(result.dimensions[subject.powerPvId].name).toBe("photovoltaics");
+ expect(result.dimensions[subject.powerAccuId].name).toBe("accumulator");
+ expect(Object.keys(result.dimensions).length).toBe(3);
+ });
+
+ it("should return the consumption chart definition", function () {
+ var suffix = "Load";
+ var result = subject.getSiteConsumptionChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("consumption");
+ expect(result.context).toBe("fronius.consumption");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.consumptionLoadId].name).toBe("load");
+ });
+
+ it("should return the autonomy chart definition", function () {
+ var suffix = "Autonomy";
+ var result = subject.getSiteAutonomyChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("%");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("autonomy");
+ expect(result.context).toBe("fronius.autonomy");
+ expect(Object.keys(result.dimensions).length).toBe(2);
+ expect(result.dimensions[subject.autonomyId].name).toBe("autonomy");
+ expect(result.dimensions[subject.consumptionSelfId].name).toBe("self_consumption");
+ });
+
+ it("should return the energy today chart definition", function () {
+ var suffix = "Energy today";
+ var result = subject.getSiteEnergyTodayChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("kWh");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("energy");
+ expect(result.context).toBe("fronius.energy.today");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.energyTodayId].name).toBe("today");
+ });
+
+ it("should return the energy year chart definition", function () {
+ var suffix = "Energy year";
+ var result = subject.getSiteEnergyYearChart(service, suffix);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("kWh");
+ expect(result.type).toBe(netdata.chartTypes.area);
+ expect(result.family).toBe("energy");
+ expect(result.context).toBe("fronius.energy.year");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[subject.energyYearId].name).toBe("year");
+ });
+
+ it("should return the inverter chart definition with a single numerical inverter", function () {
+ var inverters = {
+ "1": {}
+ };
+ var suffix = "numerical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions["1"].name).toBe("inverter_1");
+ });
+
+ it("should return the inverter chart definition with a single alphabetical inverter", function () {
+ var key = "Cellar";
+ var inverters = {
+ "Cellar": {}
+ };
+ var suffix = "alphabetical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(1);
+ expect(result.dimensions[key].name).toBe(key);
+ });
+
+ it("should return the inverter chart definition with multiple alphanumerical inverter", function () {
+ var alpha = "Cellar";
+ var numerical = 1;
+ var inverters = {
+ "Cellar": {},
+ "1": {}
+ };
+ var suffix = "alphanumerical";
+ var result = subject.getInverterPowerChart(service, suffix, inverters);
+
+ expect(result.id).toBe(chartPrefix + suffix);
+ expect(result.units).toBe("W");
+ expect(result.type).toBe(netdata.chartTypes.stacked);
+ expect(result.family).toBe("inverters");
+ expect(result.context).toBe("fronius.inverter.output");
+ expect(Object.keys(result.dimensions).length).toBe(2);
+ expect(result.dimensions[alpha].name).toBe(alpha);
+ expect(result.dimensions[numerical].name).toBe("inverter_" + numerical);
+ });
+
+ it("should return the same chart definition on second call for lazy loading", function () {
+ var first = subject.getSitePowerChart(service, "id");
+ var second = subject.getSitePowerChart(service, "id");
+
+ expect(first).toBe(second);
+ });
+}); \ No newline at end of file
diff --git a/tests/node.d/fronius.parse.spec.js b/tests/node.d/fronius.parse.spec.js
new file mode 100644
index 000000000..9c371ad98
--- /dev/null
+++ b/tests/node.d/fronius.parse.spec.js
@@ -0,0 +1,305 @@
+"use strict";
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "parse",
+ module: this
+});
+
+var root = {
+ "Body": {
+ "Data": {
+ "Site": {},
+ "Inverters": {}
+ }
+ }
+};
+
+describe("fronius parsing for power chart", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 3000 for P_Grid when rounded", function () {
+ site.P_Grid = 2999.501;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(3000);
+ });
+
+ it("should return -3000 for P_Grid", function () {
+ site.P_Grid = -3000;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(-3000);
+ });
+
+ it("should return 0 for P_Grid if it is null", function () {
+ site.P_Grid = null;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Grid if it is zero", function () {
+ site.P_Grid = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.powerGridId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return -100 for P_Akku", function () {
+ // it is unclear whether negative values are possible for p_akku (couln't test, nor any API docs found).
+ site.P_Akku = -100;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(-100);
+ });
+
+ it("should return 0 for P_Akku if it is null", function () {
+ site.P_Akku = null;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Akku if it is zero", function () {
+ site.P_Akku = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[2];
+
+ expect(result.name).toBe(subject.powerAccuId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100 for P_PV", function () {
+ site.P_PV = 100;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for P_PV if it is zero", function () {
+ site.P_PV = 0;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_PV if it is null", function () {
+ site.P_PV = null;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_PV if it is negative", function () {
+ // solar panels shouldn't consume anything, only produce.
+ site.P_PV = -1;
+ var result = subject.parsePowerChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.powerPvId);
+ expect(result.value).toBe(0);
+ });
+
+});
+
+describe("fronius parsing for consumption", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 1000 for P_Load when rounded", function () {
+ site.P_Load = 1000.499;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(1000);
+ });
+
+ it("should return absolute value for P_Load when negative", function () {
+ /*
+ with firmware 3.7.4 it is sometimes possible that negative values are returned for P_Load,
+ which makes absolutely no sense. There is always a device that consumes some electricity around the clock.
+ Best we can do is to make it a positive value, since 0 also doesn't make much sense.
+ This "workaround" seems to work, as there couldn't be any strange peaks observed during long-time testing.
+ */
+ site.P_Load = -50;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(50);
+ });
+
+ it("should return 0 for P_Load if it is null", function () {
+ site.P_Load = null;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for P_Load if it is zero", function () {
+ site.P_Load = 0;
+ var result = subject.parseConsumptionChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.consumptionLoadId);
+ expect(result.value).toBe(0);
+ });
+
+});
+
+describe("fronius parsing for autonomy", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 100 for rel_Autonomy", function () {
+ site.rel_Autonomy = 100;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for rel_Autonomy if it is zero", function () {
+ site.rel_Autonomy = 0;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 0 for rel_Autonomy if it is null", function () {
+ site.rel_Autonomy = null;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 20 for rel_Autonomy if it is 20", function () {
+ site.rel_Autonomy = 20.1;
+ var result = subject.parseAutonomyChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.autonomyId);
+ expect(result.value).toBe(20);
+ });
+
+ it("should return 20 for rel_SelfConsumption if it is 19.5", function () {
+ site.rel_SelfConsumption = 19.5;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(20);
+ });
+
+ it("should return 100 for rel_SelfConsumption if it is null", function () {
+ /*
+ During testing it could be observed that the API is delivering null if the solar panels
+ do not produce enough energy to supply the local load. But in this case it should be 100, since all
+ the produced energy is directly consumed.
+ */
+ site.rel_SelfConsumption = null;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(100);
+ });
+
+ it("should return 0 for rel_SelfConsumption if it is zero", function () {
+ site.rel_SelfConsumption = 0;
+ var result = subject.parseAutonomyChart(service, site).dimensions[1];
+
+ expect(result.name).toBe(subject.consumptionSelfId);
+ expect(result.value).toBe(0);
+ });
+});
+
+describe("fronius parsing for energy", function () {
+
+ var site = root.Body.Data.Site;
+
+ afterEach(function () {
+ deleteProperties(site);
+ });
+
+ it("should return 10000 for E_Day", function () {
+ site.E_Day = 10000;
+ var result = subject.parseEnergyTodayChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyTodayId);
+ expect(result.value).toBe(10000);
+ });
+
+ it("should return 0 for E_Day if it is negative", function () {
+ /*
+ The solar panels can't produce negative energy, really. It would be a fault of the API.
+ */
+ site.E_Day = -0.4;
+ var result = subject.parseEnergyTodayChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyTodayId);
+ expect(result.value).toBe(0);
+ });
+
+ it("should return 100'000 for E_Year", function () {
+ site.E_Year = 100000.4;
+ var result = subject.parseEnergyYearChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyYearId);
+ expect(result.value).toBe(100000);
+ });
+
+ it("should return 0 for E_Year if it is negative", function () {
+ /*
+ A return value of 0 only makes sense in the silvester night anyway, when the counter is being reset.
+ A negative value is a fault from the API though. It wouldn't make sense.
+ */
+ site.E_Year = -1;
+ var result = subject.parseEnergyYearChart(service, site).dimensions[0];
+
+ expect(result.name).toBe(subject.energyYearId);
+ expect(result.value).toBe(0);
+ });
+});
+
+describe("fronius parsing for inverters", function () {
+
+ var inverters = root.Body.Data.Inverters;
+
+ afterEach(function () {
+ deleteProperties(inverters);
+ });
+
+ it("should return 1000 for P for inverter with name", function () {
+ inverters["cellar"] = {
+ P: 1000
+ };
+ var result = subject.parseInverterChart(service, inverters).dimensions[0];
+
+ expect(result.name).toBe("cellar");
+ expect(result.value).toBe(1000);
+ });
+
+}); \ No newline at end of file
diff --git a/tests/node.d/fronius.process.spec.js b/tests/node.d/fronius.process.spec.js
new file mode 100644
index 000000000..daa84f390
--- /dev/null
+++ b/tests/node.d/fronius.process.spec.js
@@ -0,0 +1,74 @@
+"use strict";
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "process",
+ module: this
+});
+
+var exampleResponse = {
+ "Body": {
+ "Data": {
+ "Site": {
+ "Mode": "meter",
+ "P_Grid": -3430.729923,
+ "P_Load": -910.270077,
+ "P_Akku": null,
+ "P_PV": 4341,
+ "rel_SelfConsumption": 20.969133,
+ "rel_Autonomy": 100,
+ "E_Day": 57230,
+ "E_Year": 6425915.5,
+ "E_Total": 15388710,
+ "Meter_Location": "grid"
+ },
+ "Inverters": {
+ "1": {
+ "DT": 123,
+ "P": 4341,
+ "E_Day": 57230,
+ "E_Year": 6425915.5,
+ "E_Total": 15388710
+ }
+ }
+ }
+ }
+};
+
+describe("fronius main processing", function () {
+
+ beforeAll(function () {
+ // change this to enable debug log
+ netdata.options.DEBUG = false;
+ });
+
+ beforeEach(function () {
+ deleteProperties(subject.charts);
+ });
+
+ it("should send parsed values to netdata", function () {
+ netdata.send = jasmine.createSpy("send");
+
+ subject.processResponse(service, exampleResponse);
+
+ expect(netdata.send.calls.count()).toBe(6);
+
+ // check if some parsed values were sent.
+ var powerChart = netdata.send.calls.argsFor(5)[0];
+
+ expect(powerChart).toContain("SET p_grid = -3431");
+ expect(powerChart).toContain("SET p_pv = 4341");
+
+ var inverterChart = netdata.send.calls.argsFor(0)[0];
+
+ expect(inverterChart).toContain("SET 1 = 4341");
+
+ var autonomyChart = netdata.send.calls.argsFor(3)[0];
+ expect(autonomyChart).toContain("SET rel_selfconsumption = 21");
+ });
+
+
+}); \ No newline at end of file
diff --git a/tests/node.d/fronius.validation.spec.js b/tests/node.d/fronius.validation.spec.js
new file mode 100644
index 000000000..08b7e430f
--- /dev/null
+++ b/tests/node.d/fronius.validation.spec.js
@@ -0,0 +1,154 @@
+"use strict";
+
+var netdata = require("../../node.d/node_modules/netdata");
+// remember: subject will be a singleton!
+var subject = require("../../node.d/fronius.node");
+
+var service = netdata.service({
+ name: "validation",
+ module: this
+});
+
+describe("fronius response validation", function () {
+
+ it("should do nothing if response is null", function () {
+ netdata.send = jasmine.createSpy("send");
+
+ subject.processResponse(service, null);
+ var result = netdata.send.calls.count();
+
+ expect(result).toBe(0);
+ });
+
+ it("should return null if response is null", function () {
+ var result = subject.convertToJson(null);
+
+ expect(result).toBeNull();
+ });
+
+ it("should return null and log error if response cannot be parsed", function () {
+ netdata.error = jasmine.createSpy("error");
+
+ // trailing commas are enough to create syntax exceptions
+ var result = subject.convertToJson("{name,}");
+
+ expect(result).toBeNull();
+ expect(netdata.error.calls.count()).toBe(1);
+ });
+
+ it("should return true if response is valid", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Site": {
+ "Mode": "meter"
+ },
+ "Inverters": {
+ "1": {}
+ }
+ }
+ }
+ });
+
+ expect(result).toBeTruthy();
+ });
+
+ it("should return false if response is missing data", function () {
+ var result = subject.isResponseValid({
+ "Body": {}
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+ it("should return false if response is missing inverter", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Site": {}
+ }
+ }
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+ it("should return false if response is missing inverter", function () {
+ var result = subject.isResponseValid({
+ "Body": {
+ "Data": {
+ "Inverters": {}
+ }
+ }
+ });
+
+ expect(result).toBeFalsy();
+ });
+
+});
+
+describe("fronius configuration validation", function () {
+
+ it("should return 0 if there are no servers configured", function () {
+ var result = subject.configure({});
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0 if the servers array is empty", function () {
+ var result = subject.configure({
+ "servers": []
+ });
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0 if there is one server configured incorrectly", function () {
+ var result = subject.configure({
+ "servers": [{}]
+ });
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 1 if there is one server configured", function () {
+ subject.serviceExecute = jasmine.createSpy("serviceExecute");
+ var name = "solar1";
+ var result = subject.configure({
+ "servers": [{
+ "name": name,
+ "api_path": "/api/",
+ "hostname": "solar1.local"
+ }]
+ });
+
+ expect(result).toBe(1);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name, "solar1.local/api/", 5);
+ });
+
+ it("should return 2 if there are two servers configured", function () {
+ subject.serviceExecute = jasmine.createSpy("serviceExecute");
+ var name1 = "solar 1";
+ var name2 = "solar 2";
+ var result = subject.configure({
+ "servers": [
+ {
+ "name": name1,
+ "api_path": "/",
+ "hostname": "solar1.local"
+ },
+ {
+ "name": name2,
+ "api_path": "/",
+ "hostname": "solar2.local",
+ "update_every": 3
+ }
+ ]
+ });
+
+ expect(result).toBe(2);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name1, "solar1.local/", 5);
+ expect(subject.serviceExecute).toHaveBeenCalledWith(name2, "solar2.local/", 3);
+ });
+
+}); \ No newline at end of file
diff --git a/tests/web/easypiechart.chart.spec.js b/tests/web/easypiechart.chart.spec.js
new file mode 100644
index 000000000..8f5e49631
--- /dev/null
+++ b/tests/web/easypiechart.chart.spec.js
@@ -0,0 +1,39 @@
+"use strict";
+
+
+// with xdescribe, this is skipped.
+describe("creation of easy pie charts", function () {
+
+ beforeAll(function () {
+ // karma stores the loaded files relative to "base/".
+ // This command is needed to load HTML fixtures
+ jasmine.getFixtures().fixturesPath = "base/tests/web/fixtures";
+ });
+
+ it("should create new chart, but it's failure is expected for demonstration purpose", function () {
+ // arrange
+ // Theoretically we can load some html. What about jquery? could this work?
+ // https://stackoverflow.com/questions/5337481/spying-on-jquery-selectors-in-jasmine
+ loadFixtures("easypiechart.chart.fixture1.html");
+
+ // for easy pie chart, we can fake the data result:
+ var data = {
+ result: [5]
+ };
+ // act
+ var result = NETDATA.easypiechartChartCreate(createState(), data);
+ // assert
+ expect(result).toBe(true);
+ });
+
+ function createState(min, max) {
+ // create a fake state with only needed properties.
+ return {
+ tmp: {
+ easyPieChartMin: min,
+ easyPieChartMax: max
+ }
+ };
+ }
+
+}); \ No newline at end of file
diff --git a/tests/web/easypiechart.percentage.spec.js b/tests/web/easypiechart.percentage.spec.js
new file mode 100644
index 000000000..e6168bdd7
--- /dev/null
+++ b/tests/web/easypiechart.percentage.spec.js
@@ -0,0 +1,142 @@
+"use strict";
+
+
+describe("percentage calculations for easy pie charts with dynamic range", function () {
+
+ it("should return positive value, if value greater than dynamic max", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 6, 2, 10);
+
+ expect(result).toBe(60);
+ });
+
+ it("should return negative value, if value lesser than dynamic min", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -6, -10, 10);
+
+ expect(result).toBe(-60);
+ });
+
+ it("should return 0 if value is zero and min negative, max positive", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, -1, 2);
+
+ expect(result).toBe(0);
+ });
+
+ it("should return 0.1 if value and min are zero and max positive", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, 0, 2);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return -0.1 if value is zero, max and min negative", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 0, -2, -1);
+
+ expect(result).toBe(-0.1);
+ });
+
+ it("should return positive value, if max is user-defined", function () {
+ var state = createState(null, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 46, -40, 50);
+
+ expect(result).toBe(92);
+ });
+
+ it("should return negative value, if min is user-defined", function () {
+ var state = createState(-50, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -46, -50, 40);
+
+ expect(result).toBe(-92);
+ });
+
+});
+
+describe("percentage calculations for easy pie charts with fixed range", function () {
+
+ it("should return positive value, if min and max are user-defined", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 46, 40, 50);
+
+ expect(result).toBe(60);
+ });
+
+ it("should return 100 if positive min and max are user-defined, but value is greater than max", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 60, 40, 50);
+
+ expect(result).toBe(100);
+ });
+
+ it("should return 0.1 if positive min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(40, 50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 39.9, 42, 48);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return -100 if negative min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(-40, -50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -50.1, -40, -50);
+
+ expect(result).toBe(-100);
+ });
+
+ it("should return 0.1 if negative min and max are user-defined, but value is smaller than min", function () {
+ var state = createState(-40, -50);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, -50.1, -20, -45);
+
+ expect(result).toBe(-100);
+ });
+});
+
+describe("percentage calculations for easy pie charts with invalid input", function () {
+
+ it("should return 0.1 if value undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, null, 40, 50);
+
+ expect(result).toBe(0.1);
+ });
+
+ it("should return positive value if min is undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 1, null, 2);
+
+ expect(result).toBe(50);
+ });
+
+ it("should return positive if max is undefined", function () {
+ var state = createState(null, null);
+
+ var result = NETDATA.easypiechartPercentFromValueMinMax(state, 21, 42, null);
+
+ expect(result).toBe(50);
+ });
+});
+
+function createState(min, max) {
+ // create a fake state with only the needed properties.
+ return {
+ tmp: {
+ easyPieChartMin: min,
+ easyPieChartMax: max
+ }
+ };
+} \ No newline at end of file
diff --git a/tests/web/fixtures/easypiechart.chart.fixture1.html b/tests/web/fixtures/easypiechart.chart.fixture1.html
new file mode 100644
index 000000000..f0f4eb777
--- /dev/null
+++ b/tests/web/fixtures/easypiechart.chart.fixture1.html
@@ -0,0 +1,6 @@
+<div data-netdata="system.cpu"
+ data-chart-library="easypiechart"
+ data-width="5%"
+ data-height="20"
+ data-after="-30"
+></div> \ No newline at end of file
diff --git a/tests/web/karma.conf.js b/tests/web/karma.conf.js
new file mode 100644
index 000000000..b3ee0943d
--- /dev/null
+++ b/tests/web/karma.conf.js
@@ -0,0 +1,110 @@
+// Karma configuration
+// Generated on Sun Jul 16 2017 02:28:05 GMT+0200 (CEST)
+
+module.exports = function (config) {
+ config.set({
+
+ // base path that will be used to resolve all patterns (eg. files, exclude)
+ // this path should always resolve so that "." is the "netdata" root folder.
+ basePath: '../../',
+
+ // frameworks to use
+ // available frameworks: https://npmjs.org/browse/keyword/karma-adapter
+ frameworks: ['jasmine'],
+
+
+ // list of files / patterns to load in the browser
+ files: [
+ // order matters! load jquery libraries first
+ 'web/lib/jquery*.js',
+ // our jasmine libs and fixtures
+ 'tests/web/lib/*.js',
+ 'tests/web/fixtures/*.html',
+ // then bootstrap
+ 'web/lib/bootstrap*.js',
+ // then the rest
+ 'web/lib/perfect-scrollbar*.js',
+ 'web/lib/dygraph*.js',
+ 'web/lib/gauge*.js',
+ 'web/lib/morris*.js',
+ 'web/lib/raphael*.js',
+ 'web/lib/tableExport*.js',
+ 'web/lib/d3*.js',
+ 'web/lib/c3*.js',
+ // some CSS
+ 'web/css/*.css',
+ 'web/dashboard.css',
+ // our dashboard
+ 'web/dashboard.js',
+ // finally our test specs
+ 'tests/web/*.spec.js',
+ ],
+
+
+ // list of files to exclude
+ exclude: [],
+
+
+ // preprocess matching files before serving them to the browser
+ // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
+ preprocessors: {
+ 'web/dashboard.js': ['coverage']
+ },
+
+
+ // test results reporter to use
+ // possible values: 'dots', 'progress'
+ // available reporters: https://npmjs.org/browse/keyword/karma-reporter
+ reporters: ['progress', 'coverage'],
+
+ // optionally, configure the reporter
+ coverageReporter: {
+ type : 'html',
+ dir : 'coverage/'
+ },
+
+ // web server port
+ port: 9876,
+
+
+ // enable / disable colors in the output (reporters and logs)
+ colors: true,
+
+
+ // level of logging
+ // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
+ logLevel: config.LOG_INFO,
+
+
+ // enable / disable watching file and executing tests whenever any file changes
+ autoWatch: false,
+ // not needed with WebStorm. Just hit Alt+Shift+R to rerun.
+
+ // start these browsers
+ // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
+ browsers: ['Chromium', 'ChromiumHeadless'],
+
+ customLaunchers: {
+ // Headless browsers could be useful for CI integration, if installed.
+ ChromiumHeadless: {
+ // needs Chrome/Chromium version >= 59
+ // see https://chromium.googlesource.com/chromium/src/+/lkgr/headless/README.md
+ base: "Chromium",
+ flags: [
+ "--headless",
+ "--disable-gpu",
+ // Without a remote debugging port, Chromium exits immediately.
+ "--remote-debugging-port=9222"
+ ]
+ }
+ },
+
+ // Continuous Integration mode
+ // if true, Karma captures browsers, runs the tests and exits
+ singleRun: false,
+
+ // Concurrency level
+ // how many browser should be started simultaneous
+ concurrency: Infinity
+ })
+};
diff --git a/tests/web/lib/jasmine-jquery.js b/tests/web/lib/jasmine-jquery.js
new file mode 100644
index 000000000..6e4611c19
--- /dev/null
+++ b/tests/web/lib/jasmine-jquery.js
@@ -0,0 +1,841 @@
+/*!
+ Jasmine-jQuery: a set of jQuery helpers for Jasmine tests.
+
+ Version 2.1.1
+
+ https://github.com/velesin/jasmine-jquery
+
+ Copyright (c) 2010-2014 Wojciech Zawistowski, Travis Jeffery
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+(function (root, factory) {
+ if (typeof module !== 'undefined' && module.exports && typeof exports !== 'undefined') {
+ factory(root, root.jasmine, require('jquery'));
+ } else {
+ factory(root, root.jasmine, root.jQuery);
+ }
+}((function() {return this; })(), function (window, jasmine, $) { "use strict";
+
+ jasmine.spiedEventsKey = function (selector, eventName) {
+ return [$(selector).selector, eventName].toString()
+ }
+
+ jasmine.getFixtures = function () {
+ return jasmine.currentFixtures_ = jasmine.currentFixtures_ || new jasmine.Fixtures()
+ }
+
+ jasmine.getStyleFixtures = function () {
+ return jasmine.currentStyleFixtures_ = jasmine.currentStyleFixtures_ || new jasmine.StyleFixtures()
+ }
+
+ jasmine.Fixtures = function () {
+ this.containerId = 'jasmine-fixtures'
+ this.fixturesCache_ = {}
+ this.fixturesPath = 'spec/javascripts/fixtures'
+ }
+
+ jasmine.Fixtures.prototype.set = function (html) {
+ this.cleanUp()
+ return this.createContainer_(html)
+ }
+
+ jasmine.Fixtures.prototype.appendSet= function (html) {
+ this.addToContainer_(html)
+ }
+
+ jasmine.Fixtures.prototype.preload = function () {
+ this.read.apply(this, arguments)
+ }
+
+ jasmine.Fixtures.prototype.load = function () {
+ this.cleanUp()
+ this.createContainer_(this.read.apply(this, arguments))
+ }
+
+ jasmine.Fixtures.prototype.appendLoad = function () {
+ this.addToContainer_(this.read.apply(this, arguments))
+ }
+
+ jasmine.Fixtures.prototype.read = function () {
+ var htmlChunks = []
+ , fixtureUrls = arguments
+
+ for(var urlCount = fixtureUrls.length, urlIndex = 0; urlIndex < urlCount; urlIndex++) {
+ htmlChunks.push(this.getFixtureHtml_(fixtureUrls[urlIndex]))
+ }
+
+ return htmlChunks.join('')
+ }
+
+ jasmine.Fixtures.prototype.clearCache = function () {
+ this.fixturesCache_ = {}
+ }
+
+ jasmine.Fixtures.prototype.cleanUp = function () {
+ $('#' + this.containerId).remove()
+ }
+
+ jasmine.Fixtures.prototype.sandbox = function (attributes) {
+ var attributesToSet = attributes || {}
+ return $('<div id="sandbox" />').attr(attributesToSet)
+ }
+
+ jasmine.Fixtures.prototype.createContainer_ = function (html) {
+ var container = $('<div>')
+ .attr('id', this.containerId)
+ .html(html)
+
+ $(document.body).append(container)
+ return container
+ }
+
+ jasmine.Fixtures.prototype.addToContainer_ = function (html){
+ var container = $(document.body).find('#'+this.containerId).append(html)
+
+ if (!container.length) {
+ this.createContainer_(html)
+ }
+ }
+
+ jasmine.Fixtures.prototype.getFixtureHtml_ = function (url) {
+ if (typeof this.fixturesCache_[url] === 'undefined') {
+ this.loadFixtureIntoCache_(url)
+ }
+ return this.fixturesCache_[url]
+ }
+
+ jasmine.Fixtures.prototype.loadFixtureIntoCache_ = function (relativeUrl) {
+ var self = this
+ , url = this.makeFixtureUrl_(relativeUrl)
+ , htmlText = ''
+ , request = $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ url: url,
+ dataType: 'html',
+ success: function (data, status, $xhr) {
+ htmlText = $xhr.responseText
+ }
+ }).fail(function ($xhr, status, err) {
+ throw new Error('Fixture could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ })
+
+ var scripts = $($.parseHTML(htmlText, true)).find('script[src]') || [];
+
+ scripts.each(function(){
+ $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ dataType: 'script',
+ url: $(this).attr('src'),
+ success: function (data, status, $xhr) {
+ htmlText += '<script>' + $xhr.responseText + '</script>'
+ },
+ error: function ($xhr, status, err) {
+ throw new Error('Script could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ }
+ });
+ })
+
+ self.fixturesCache_[relativeUrl] = htmlText;
+ }
+
+ jasmine.Fixtures.prototype.makeFixtureUrl_ = function (relativeUrl){
+ return this.fixturesPath.match('/$') ? this.fixturesPath + relativeUrl : this.fixturesPath + '/' + relativeUrl
+ }
+
+ jasmine.Fixtures.prototype.proxyCallTo_ = function (methodName, passedArguments) {
+ return this[methodName].apply(this, passedArguments)
+ }
+
+
+ jasmine.StyleFixtures = function () {
+ this.fixturesCache_ = {}
+ this.fixturesNodes_ = []
+ this.fixturesPath = 'spec/javascripts/fixtures'
+ }
+
+ jasmine.StyleFixtures.prototype.set = function (css) {
+ this.cleanUp()
+ this.createStyle_(css)
+ }
+
+ jasmine.StyleFixtures.prototype.appendSet = function (css) {
+ this.createStyle_(css)
+ }
+
+ jasmine.StyleFixtures.prototype.preload = function () {
+ this.read_.apply(this, arguments)
+ }
+
+ jasmine.StyleFixtures.prototype.load = function () {
+ this.cleanUp()
+ this.createStyle_(this.read_.apply(this, arguments))
+ }
+
+ jasmine.StyleFixtures.prototype.appendLoad = function () {
+ this.createStyle_(this.read_.apply(this, arguments))
+ }
+
+ jasmine.StyleFixtures.prototype.cleanUp = function () {
+ while(this.fixturesNodes_.length) {
+ this.fixturesNodes_.pop().remove()
+ }
+ }
+
+ jasmine.StyleFixtures.prototype.createStyle_ = function (html) {
+ var styleText = $('<div></div>').html(html).text()
+ , style = $('<style>' + styleText + '</style>')
+
+ this.fixturesNodes_.push(style)
+ $('head').append(style)
+ }
+
+ jasmine.StyleFixtures.prototype.clearCache = jasmine.Fixtures.prototype.clearCache
+ jasmine.StyleFixtures.prototype.read_ = jasmine.Fixtures.prototype.read
+ jasmine.StyleFixtures.prototype.getFixtureHtml_ = jasmine.Fixtures.prototype.getFixtureHtml_
+ jasmine.StyleFixtures.prototype.loadFixtureIntoCache_ = jasmine.Fixtures.prototype.loadFixtureIntoCache_
+ jasmine.StyleFixtures.prototype.makeFixtureUrl_ = jasmine.Fixtures.prototype.makeFixtureUrl_
+ jasmine.StyleFixtures.prototype.proxyCallTo_ = jasmine.Fixtures.prototype.proxyCallTo_
+
+ jasmine.getJSONFixtures = function () {
+ return jasmine.currentJSONFixtures_ = jasmine.currentJSONFixtures_ || new jasmine.JSONFixtures()
+ }
+
+ jasmine.JSONFixtures = function () {
+ this.fixturesCache_ = {}
+ this.fixturesPath = 'spec/javascripts/fixtures/json'
+ }
+
+ jasmine.JSONFixtures.prototype.load = function () {
+ this.read.apply(this, arguments)
+ return this.fixturesCache_
+ }
+
+ jasmine.JSONFixtures.prototype.read = function () {
+ var fixtureUrls = arguments
+
+ for(var urlCount = fixtureUrls.length, urlIndex = 0; urlIndex < urlCount; urlIndex++) {
+ this.getFixtureData_(fixtureUrls[urlIndex])
+ }
+
+ return this.fixturesCache_
+ }
+
+ jasmine.JSONFixtures.prototype.clearCache = function () {
+ this.fixturesCache_ = {}
+ }
+
+ jasmine.JSONFixtures.prototype.getFixtureData_ = function (url) {
+ if (!this.fixturesCache_[url]) this.loadFixtureIntoCache_(url)
+ return this.fixturesCache_[url]
+ }
+
+ jasmine.JSONFixtures.prototype.loadFixtureIntoCache_ = function (relativeUrl) {
+ var self = this
+ , url = this.fixturesPath.match('/$') ? this.fixturesPath + relativeUrl : this.fixturesPath + '/' + relativeUrl
+
+ $.ajax({
+ async: false, // must be synchronous to guarantee that no tests are run before fixture is loaded
+ cache: false,
+ dataType: 'json',
+ url: url,
+ success: function (data) {
+ self.fixturesCache_[relativeUrl] = data
+ },
+ error: function ($xhr, status, err) {
+ throw new Error('JSONFixture could not be loaded: ' + url + ' (status: ' + status + ', message: ' + err.message + ')')
+ }
+ })
+ }
+
+ jasmine.JSONFixtures.prototype.proxyCallTo_ = function (methodName, passedArguments) {
+ return this[methodName].apply(this, passedArguments)
+ }
+
+ jasmine.jQuery = function () {}
+
+ jasmine.jQuery.browserTagCaseIndependentHtml = function (html) {
+ return $('<div/>').append(html).html()
+ }
+
+ jasmine.jQuery.elementToString = function (element) {
+ return $(element).map(function () { return this.outerHTML; }).toArray().join(', ')
+ }
+
+ var data = {
+ spiedEvents: {}
+ , handlers: []
+ }
+
+ jasmine.jQuery.events = {
+ spyOn: function (selector, eventName) {
+ var handler = function (e) {
+ var calls = (typeof data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] !== 'undefined') ? data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : 0
+ data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] = {
+ args: jasmine.util.argsToArray(arguments),
+ calls: ++calls
+ }
+ }
+
+ $(selector).on(eventName, handler)
+ data.handlers.push(handler)
+
+ return {
+ selector: selector,
+ eventName: eventName,
+ handler: handler,
+ reset: function (){
+ delete data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ },
+ calls: {
+ count: function () {
+ return data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] ?
+ data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : 0;
+ },
+ any: function () {
+ return data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)] ?
+ !!data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].calls : false;
+ }
+ }
+ }
+ },
+
+ args: function (selector, eventName) {
+ var actualArgs = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)].args
+
+ if (!actualArgs) {
+ throw "There is no spy for " + eventName + " on " + selector.toString() + ". Make sure to create a spy using spyOnEvent."
+ }
+
+ return actualArgs
+ },
+
+ wasTriggered: function (selector, eventName) {
+ return !!(data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)])
+ },
+
+ wasTriggeredWith: function (selector, eventName, expectedArgs, util, customEqualityTesters) {
+ var actualArgs = jasmine.jQuery.events.args(selector, eventName).slice(1)
+
+ if (Object.prototype.toString.call(expectedArgs) !== '[object Array]')
+ actualArgs = actualArgs[0]
+
+ return util.equals(actualArgs, expectedArgs, customEqualityTesters)
+ },
+
+ wasPrevented: function (selector, eventName) {
+ var spiedEvent = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ , args = (jasmine.util.isUndefined(spiedEvent)) ? {} : spiedEvent.args
+ , e = args ? args[0] : undefined
+
+ return e && e.isDefaultPrevented()
+ },
+
+ wasStopped: function (selector, eventName) {
+ var spiedEvent = data.spiedEvents[jasmine.spiedEventsKey(selector, eventName)]
+ , args = (jasmine.util.isUndefined(spiedEvent)) ? {} : spiedEvent.args
+ , e = args ? args[0] : undefined
+
+ return e && e.isPropagationStopped()
+ },
+
+ cleanUp: function () {
+ data.spiedEvents = {}
+ data.handlers = []
+ }
+ }
+
+ var hasProperty = function (actualValue, expectedValue) {
+ if (expectedValue === undefined)
+ return actualValue !== undefined
+
+ return actualValue === expectedValue
+ }
+
+ beforeEach(function () {
+ jasmine.addMatchers({
+ toHaveClass: function () {
+ return {
+ compare: function (actual, className) {
+ return { pass: $(actual).hasClass(className) }
+ }
+ }
+ },
+
+ toHaveCss: function () {
+ return {
+ compare: function (actual, css) {
+ var stripCharsRegex = /[\s;\"\']/g
+ for (var prop in css) {
+ var value = css[prop]
+ // see issue #147 on gh
+ ;if ((value === 'auto') && ($(actual).get(0).style[prop] === 'auto')) continue
+ var actualStripped = $(actual).css(prop).replace(stripCharsRegex, '')
+ var valueStripped = value.replace(stripCharsRegex, '')
+ if (actualStripped !== valueStripped) return { pass: false }
+ }
+ return { pass: true }
+ }
+ }
+ },
+
+ toBeVisible: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':visible') }
+ }
+ }
+ },
+
+ toBeHidden: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':hidden') }
+ }
+ }
+ },
+
+ toBeSelected: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':selected') }
+ }
+ }
+ },
+
+ toBeChecked: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':checked') }
+ }
+ }
+ },
+
+ toBeEmpty: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).is(':empty') }
+ }
+ }
+ },
+
+ toBeInDOM: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $.contains(document.documentElement, $(actual)[0]) }
+ }
+ }
+ },
+
+ toExist: function () {
+ return {
+ compare: function (actual) {
+ return { pass: $(actual).length }
+ }
+ }
+ },
+
+ toHaveLength: function () {
+ return {
+ compare: function (actual, length) {
+ return { pass: $(actual).length === length }
+ }
+ }
+ },
+
+ toHaveAttr: function () {
+ return {
+ compare: function (actual, attributeName, expectedAttributeValue) {
+ return { pass: hasProperty($(actual).attr(attributeName), expectedAttributeValue) }
+ }
+ }
+ },
+
+ toHaveProp: function () {
+ return {
+ compare: function (actual, propertyName, expectedPropertyValue) {
+ return { pass: hasProperty($(actual).prop(propertyName), expectedPropertyValue) }
+ }
+ }
+ },
+
+ toHaveId: function () {
+ return {
+ compare: function (actual, id) {
+ return { pass: $(actual).attr('id') == id }
+ }
+ }
+ },
+
+ toHaveHtml: function () {
+ return {
+ compare: function (actual, html) {
+ return { pass: $(actual).html() == jasmine.jQuery.browserTagCaseIndependentHtml(html) }
+ }
+ }
+ },
+
+ toContainHtml: function () {
+ return {
+ compare: function (actual, html) {
+ var actualHtml = $(actual).html()
+ , expectedHtml = jasmine.jQuery.browserTagCaseIndependentHtml(html)
+
+ return { pass: (actualHtml.indexOf(expectedHtml) >= 0) }
+ }
+ }
+ },
+
+ toHaveText: function () {
+ return {
+ compare: function (actual, text) {
+ var actualText = $(actual).text()
+ var trimmedText = $.trim(actualText)
+
+ if (text && $.isFunction(text.test)) {
+ return { pass: text.test(actualText) || text.test(trimmedText) }
+ } else {
+ return { pass: (actualText == text || trimmedText == text) }
+ }
+ }
+ }
+ },
+
+ toContainText: function () {
+ return {
+ compare: function (actual, text) {
+ var trimmedText = $.trim($(actual).text())
+
+ if (text && $.isFunction(text.test)) {
+ return { pass: text.test(trimmedText) }
+ } else {
+ return { pass: trimmedText.indexOf(text) != -1 }
+ }
+ }
+ }
+ },
+
+ toHaveValue: function () {
+ return {
+ compare: function (actual, value) {
+ return { pass: $(actual).val() === value }
+ }
+ }
+ },
+
+ toHaveData: function () {
+ return {
+ compare: function (actual, key, expectedValue) {
+ return { pass: hasProperty($(actual).data(key), expectedValue) }
+ }
+ }
+ },
+
+ toContainElement: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).find(selector).length }
+ }
+ }
+ },
+
+ toBeMatchedBy: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).filter(selector).length }
+ }
+ }
+ },
+
+ toBeDisabled: function () {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual).is(':disabled') }
+ }
+ }
+ },
+
+ toBeFocused: function (selector) {
+ return {
+ compare: function (actual, selector) {
+ return { pass: $(actual)[0] === $(actual)[0].ownerDocument.activeElement }
+ }
+ }
+ },
+
+ toHandle: function () {
+ return {
+ compare: function (actual, event) {
+ if ( !actual || actual.length === 0 ) return { pass: false };
+ var events = $._data($(actual).get(0), "events")
+
+ if (!events || !event || typeof event !== "string") {
+ return { pass: false }
+ }
+
+ var namespaces = event.split(".")
+ , eventType = namespaces.shift()
+ , sortedNamespaces = namespaces.slice(0).sort()
+ , namespaceRegExp = new RegExp("(^|\\.)" + sortedNamespaces.join("\\.(?:.*\\.)?") + "(\\.|$)")
+
+ if (events[eventType] && namespaces.length) {
+ for (var i = 0; i < events[eventType].length; i++) {
+ var namespace = events[eventType][i].namespace
+
+ if (namespaceRegExp.test(namespace))
+ return { pass: true }
+ }
+ } else {
+ return { pass: (events[eventType] && events[eventType].length > 0) }
+ }
+
+ return { pass: false }
+ }
+ }
+ },
+
+ toHandleWith: function () {
+ return {
+ compare: function (actual, eventName, eventHandler) {
+ if ( !actual || actual.length === 0 ) return { pass: false };
+ var normalizedEventName = eventName.split('.')[0]
+ , stack = $._data($(actual).get(0), "events")[normalizedEventName]
+
+ for (var i = 0; i < stack.length; i++) {
+ if (stack[i].handler == eventHandler) return { pass: true }
+ }
+
+ return { pass: false }
+ }
+ }
+ },
+
+ toHaveBeenTriggeredOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasTriggered(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + $(actual) + " not to have been triggered on " + selector :
+ "Expected event " + $(actual) + " to have been triggered on " + selector
+
+ return result;
+ }
+ }
+ },
+
+ toHaveBeenTriggered: function (){
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasTriggered(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been triggered on " + selector :
+ "Expected event " + eventName + " to have been triggered on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenTriggeredOnAndWith: function (j$, customEqualityTesters) {
+ return {
+ compare: function (actual, selector, expectedArgs) {
+ var wasTriggered = jasmine.jQuery.events.wasTriggered(selector, actual)
+ , result = { pass: wasTriggered && jasmine.jQuery.events.wasTriggeredWith(selector, actual, expectedArgs, j$, customEqualityTesters) }
+
+ if (wasTriggered) {
+ var actualArgs = jasmine.jQuery.events.args(selector, actual, expectedArgs)[1]
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been triggered with " + jasmine.pp(expectedArgs) + " but it was triggered with " + jasmine.pp(actualArgs) :
+ "Expected event " + actual + " to have been triggered with " + jasmine.pp(expectedArgs) + " but it was triggered with " + jasmine.pp(actualArgs)
+
+ } else {
+ // todo check on this
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been triggered on " + selector :
+ "Expected event " + actual + " to have been triggered on " + selector
+ }
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenPreventedOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasPrevented(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been prevented on " + selector :
+ "Expected event " + actual + " to have been prevented on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenPrevented: function () {
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasPrevented(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been prevented on " + selector :
+ "Expected event " + eventName + " to have been prevented on " + selector
+
+ return result
+ }
+ }
+ },
+
+ toHaveBeenStoppedOn: function () {
+ return {
+ compare: function (actual, selector) {
+ var result = { pass: jasmine.jQuery.events.wasStopped(selector, actual) }
+
+ result.message = result.pass ?
+ "Expected event " + actual + " not to have been stopped on " + selector :
+ "Expected event " + actual + " to have been stopped on " + selector
+
+ return result;
+ }
+ }
+ },
+
+ toHaveBeenStopped: function () {
+ return {
+ compare: function (actual) {
+ var eventName = actual.eventName
+ , selector = actual.selector
+ , result = { pass: jasmine.jQuery.events.wasStopped(selector, eventName) }
+
+ result.message = result.pass ?
+ "Expected event " + eventName + " not to have been stopped on " + selector :
+ "Expected event " + eventName + " to have been stopped on " + selector
+
+ return result
+ }
+ }
+ }
+ })
+
+ jasmine.getEnv().addCustomEqualityTester(function(a, b) {
+ if (a && b) {
+ if (a instanceof $ || jasmine.isDomNode(a)) {
+ var $a = $(a)
+
+ if (b instanceof $)
+ return $a.length == b.length && $a.is(b)
+
+ return $a.is(b);
+ }
+
+ if (b instanceof $ || jasmine.isDomNode(b)) {
+ var $b = $(b)
+
+ if (a instanceof $)
+ return a.length == $b.length && $b.is(a)
+
+ return $b.is(a);
+ }
+ }
+ })
+
+ jasmine.getEnv().addCustomEqualityTester(function (a, b) {
+ if (a instanceof $ && b instanceof $ && a.size() == b.size())
+ return a.is(b)
+ })
+ })
+
+ afterEach(function () {
+ jasmine.getFixtures().cleanUp()
+ jasmine.getStyleFixtures().cleanUp()
+ jasmine.jQuery.events.cleanUp()
+ })
+
+ window.readFixtures = function () {
+ return jasmine.getFixtures().proxyCallTo_('read', arguments)
+ }
+
+ window.preloadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('preload', arguments)
+ }
+
+ window.loadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.appendLoadFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('appendLoad', arguments)
+ }
+
+ window.setFixtures = function (html) {
+ return jasmine.getFixtures().proxyCallTo_('set', arguments)
+ }
+
+ window.appendSetFixtures = function () {
+ jasmine.getFixtures().proxyCallTo_('appendSet', arguments)
+ }
+
+ window.sandbox = function (attributes) {
+ return jasmine.getFixtures().sandbox(attributes)
+ }
+
+ window.spyOnEvent = function (selector, eventName) {
+ return jasmine.jQuery.events.spyOn(selector, eventName)
+ }
+
+ window.preloadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('preload', arguments)
+ }
+
+ window.loadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.appendLoadStyleFixtures = function () {
+ jasmine.getStyleFixtures().proxyCallTo_('appendLoad', arguments)
+ }
+
+ window.setStyleFixtures = function (html) {
+ jasmine.getStyleFixtures().proxyCallTo_('set', arguments)
+ }
+
+ window.appendSetStyleFixtures = function (html) {
+ jasmine.getStyleFixtures().proxyCallTo_('appendSet', arguments)
+ }
+
+ window.loadJSONFixtures = function () {
+ return jasmine.getJSONFixtures().proxyCallTo_('load', arguments)
+ }
+
+ window.getJSONFixture = function (url) {
+ return jasmine.getJSONFixtures().proxyCallTo_('read', arguments)[url]
+ }
+}));
diff --git a/web/dashboard.css b/web/dashboard.css
index 2147c6038..42ffa3ddb 100644
--- a/web/dashboard.css
+++ b/web/dashboard.css
@@ -37,6 +37,7 @@ body {
justify-content: center;
-webkit-justify-content: center;
-moz-justify-content: center;
+ padding-top: 10px;
}
.netdata-container {
diff --git a/web/dashboard.html b/web/dashboard.html
index 4453c996e..1e482daba 100644
--- a/web/dashboard.html
+++ b/web/dashboard.html
@@ -652,4 +652,4 @@ So, to avoid flashing the charts, we destroy and re-create the charts on each up
<!-- <script> netdataServer = "http://box:19999"; </script> -->
<!-- load the dashboard manager - it will do the rest -->
-<script type="text/javascript" src="dashboard.js?v20170715-1"></script>
+<script type="text/javascript" src="dashboard.js?v20170815-15"></script>
diff --git a/web/dashboard.js b/web/dashboard.js
index 1f240a4c8..f119a5370 100644
--- a/web/dashboard.js
+++ b/web/dashboard.js
@@ -51,7 +51,7 @@
var NETDATA = window.NETDATA || {};
-(function(window, document) {
+(function(window, document, $, undefined) {
// ------------------------------------------------------------------------
// compatibility fixes
@@ -144,7 +144,7 @@ var NETDATA = window.NETDATA || {};
NETDATA.themes = {
white: {
bootstrap_css: NETDATA.serverDefault + 'css/bootstrap-3.3.7.css',
- dashboard_css: NETDATA.serverDefault + 'dashboard.css?v20170605-2',
+ dashboard_css: NETDATA.serverDefault + 'dashboard.css?v20170725-1',
background: '#FFFFFF',
foreground: '#000000',
grid: '#F0F0F0',
@@ -161,7 +161,7 @@ var NETDATA = window.NETDATA || {};
},
slate: {
bootstrap_css: NETDATA.serverDefault + 'css/bootstrap-slate-flat-3.3.7.css?v20161229-1',
- dashboard_css: NETDATA.serverDefault + 'dashboard.slate.css?v20170605-2',
+ dashboard_css: NETDATA.serverDefault + 'dashboard.slate.css?v20170725-1',
background: '#272b30',
foreground: '#C8C8C8',
grid: '#283236',
@@ -2554,11 +2554,14 @@ var NETDATA = window.NETDATA || {};
else
delta = Math.abs(max - min);
- if (delta > 1000) __legendFormatValueChartDecimals = 0;
- else if (delta > 10) __legendFormatValueChartDecimals = 1;
- else if (delta > 1) __legendFormatValueChartDecimals = 2;
- else if (delta > 0.1) __legendFormatValueChartDecimals = 2;
- else __legendFormatValueChartDecimals = 4;
+ if (delta > 1000) __legendFormatValueChartDecimals = 0;
+ else if (delta > 10) __legendFormatValueChartDecimals = 1;
+ else if (delta > 1) __legendFormatValueChartDecimals = 2;
+ else if (delta > 0.1) __legendFormatValueChartDecimals = 2;
+ else if (delta > 0.01) __legendFormatValueChartDecimals = 4;
+ else if (delta > 0.001) __legendFormatValueChartDecimals = 5;
+ else if (delta > 0.0001) __legendFormatValueChartDecimals = 6;
+ else __legendFormatValueChartDecimals = 7;
}
if(__legendFormatValueChartDecimals !== old) {
@@ -2585,11 +2588,14 @@ var NETDATA = window.NETDATA || {};
else {
dmin = 0;
var abs = (value < 0) ? -value : value;
- if (abs > 1000) dmax = 0;
- else if (abs > 10) dmax = 1;
- else if (abs > 1) dmax = 2;
- else if (abs > 0.1) dmax = 2;
- else dmax = 4;
+ if (abs > 1000) dmax = 0;
+ else if (abs > 10) dmax = 1;
+ else if (abs > 1) dmax = 2;
+ else if (abs > 0.1) dmax = 2;
+ else if (abs > 0.01) dmax = 4;
+ else if (abs > 0.001) dmax = 5;
+ else if (abs > 0.0001) dmax = 6;
+ else dmax = 7;
}
return NETDATA.fastNumberFormat.get(dmin, dmax).format(value);
@@ -3926,14 +3932,20 @@ var NETDATA = window.NETDATA || {};
// script.onabort = onError;
script.onerror = function() { NETDATA.error(101, NETDATA.jQuery); };
- if(typeof callback === "function")
- script.onload = callback;
+ if(typeof callback === "function") {
+ script.onload = function () {
+ $ = jQuery;
+ return callback();
+ };
+ }
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(script, s);
}
- else if(typeof callback === "function")
+ else if(typeof callback === "function") {
+ $ = jQuery;
return callback();
+ }
};
NETDATA._loadCSS = function(filename) {
@@ -5557,23 +5569,34 @@ var NETDATA = window.NETDATA || {};
if(typeof min !== 'number') min = 0;
if(typeof max !== 'number') max = 0;
+ if(min > max) {
+ var t = min;
+ min = max;
+ max = t;
+ }
+
if(min > value) min = value;
if(max < value) max = value;
- // make sure it is zero based
- // but only they have not been set by the user
if(state.tmp.easyPieChartMin === null && min > 0) min = 0;
if(state.tmp.easyPieChartMax === null && max < 0) max = 0;
- var pcent = 0;
- if(value >= 0) {
- if(max !== 0)
- pcent = Math.round(value * 100 / max);
+ var pcent;
+
+ if(min < 0 && max > 0) {
+ // it is both positive and negative
+ // zero at the top center of the chart
+ max = (-min > max)? -min : max;
+ pcent = Math.round(value * 100 / max);
+ }
+ else if(value >= 0 && min >= 0 && max >= 0) {
+ // clockwise
+ pcent = Math.round((value - min) * 100 / (max - min));
if(pcent === 0) pcent = 0.1;
}
else {
- if(min !== 0)
- pcent = Math.round(-value * 100 / min);
+ // counter clockwise
+ pcent = Math.round((value - max) * 100 / (max - min));
if(pcent === 0) pcent = -0.1;
}
@@ -7069,4 +7092,4 @@ var NETDATA = window.NETDATA || {};
}
});
});
-})(window, document);
+})(window, document, (typeof jQuery === 'function')?jQuery:undefined);
diff --git a/web/dashboard.slate.css b/web/dashboard.slate.css
index f12a6aab9..7445d532c 100644
--- a/web/dashboard.slate.css
+++ b/web/dashboard.slate.css
@@ -51,6 +51,7 @@ code {
justify-content: center;
-moz--webkit-justify-content: center;
-moz-justify-content: center;
+ padding-top: 10px;
}
.netdata-container {
diff --git a/web/dashboard_info.js b/web/dashboard_info.js
index 91e007a1d..bb2f95991 100644
--- a/web/dashboard_info.js
+++ b/web/dashboard_info.js
@@ -291,6 +291,12 @@ netdataDashboard.menu = {
info: undefined
},
+ 'stiebeleltron': {
+ title: 'Stiebel Eltron',
+ icon: '<i class="fa fa-thermometer-full" aria-hidden="true"></i>',
+ info: undefined
+ },
+
'snmp': {
title: 'SNMP',
icon: '<i class="fa fa-random" aria-hidden="true"></i>',
@@ -301,6 +307,11 @@ netdataDashboard.menu = {
title: 'Go - expvars',
icon: '<i class="fa fa-eye" aria-hidden="true"></i>',
info: 'Statistics about running Go applications exposed by the <a href="https://golang.org/pkg/expvar/" target="_blank">expvar package</a>.'
+ },
+
+ 'chrony': {
+ icon: '<i class="fa fa-clock-o" aria-hidden="true"></i>',
+ info: 'chronyd parameters about the system’s clock performance.'
}
};
@@ -470,7 +481,7 @@ netdataDashboard.context = {
'system.entropy': {
colors: '#CC22AA',
- info: '<a href="https://en.wikipedia.org/wiki/Entropy_(computing)" target="_blank">Entropy</a>, is like a pool of random numbers (<a href="https://en.wikipedia.org/wiki//dev/random" target="_blank">/dev/random</a>) that are mainly used in cryptography. It is advised that the pool remains always <a href="https://blog.cloudflare.com/ensuring-randomness-with-linuxs-random-number-generator/" target="_blank">above 200</a>. If the pool of entropy gets empty, you risk your security to be predictable and you should install a user-space random numbers generating daemon, like <code>haveged</code> or <code>rng-tools</code> (i.e. <b>rngd</b>), to keep the pool in healthy levels.'
+ info: '<a href="https://en.wikipedia.org/wiki/Entropy_(computing)" target="_blank">Entropy</a>, is a pool of random numbers (<a href="https://en.wikipedia.org/wiki//dev/random" target="_blank">/dev/random</a>) that is mainly used in cryptography. If the pool of entropy gets empty, processes requiring random numbers may run a lot slower (it depends on the interface each program uses), waiting for the pool to be replenished. Ideally a system with high entropy demands should have a hardware device for that purpose (TPM is one such device). There are also several software-only options you may install, like <code>haveged</code>, although these are generally useful only in servers.'
},
'system.forks': {
@@ -589,7 +600,7 @@ netdataDashboard.context = {
},
'mem.kernel': {
- info: 'The total ammount of memory being used by the kernel. <b>Slab</b> is the amount of memory used by the kernel to cache data structures for its own use. <b>KernelStack</b> is the amount of memory allocated for each task done by the kernel. <b>PageTables</b> is the amount of memory decicated to the lowest level of page tables (A page table is used to turn a virtual address into a physical memory address). <b>VmallocUsed</b> is the amount of memory being used as virtual address space.'
+ info: 'The total amount of memory being used by the kernel. <b>Slab</b> is the amount of memory used by the kernel to cache data structures for its own use. <b>KernelStack</b> is the amount of memory allocated for each task done by the kernel. <b>PageTables</b> is the amount of memory decicated to the lowest level of page tables (A page table is used to turn a virtual address into a physical memory address). <b>VmallocUsed</b> is the amount of memory being used as virtual address space.'
},
'mem.slab': {
@@ -703,9 +714,53 @@ netdataDashboard.context = {
// NETWORK INTERFACES
'net.net': {
+ mainheads: [
+ function(os, id) {
+ void(os);
+ if(id.match(/^cgroup_.*/)) {
+ var iface;
+ try {
+ iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length);
+ }
+ catch (e) {
+ iface = '';
+ }
+ return netdataDashboard.gaugeChart('Received' + iface, '12%', 'received');
+ }
+ else
+ return '';
+ },
+ function(os, id) {
+ void(os);
+ if(id.match(/^cgroup_.*/)) {
+ var iface;
+ try {
+ iface = ' ' + id.substring(id.lastIndexOf('.net_') + 5, id.length);
+ }
+ catch (e) {
+ iface = '';
+ }
+ return netdataDashboard.gaugeChart('Sent' + iface, '12%', 'sent');
+ }
+ else
+ return '';
+ }
+ ],
heads: [
- netdataDashboard.gaugeChart('Received', '12%', 'received'),
- netdataDashboard.gaugeChart('Sent', '12%', 'sent')
+ function(os, id) {
+ void(os);
+ if(!id.match(/^cgroup_.*/))
+ return netdataDashboard.gaugeChart('Received', '12%', 'received');
+ else
+ return '';
+ },
+ function(os, id) {
+ void(os);
+ if(!id.match(/^cgroup_.*/))
+ return netdataDashboard.gaugeChart('Sent', '12%', 'sent');
+ else
+ return '';
+ }
]
},
@@ -1020,6 +1075,83 @@ netdataDashboard.context = {
height: 0.5
},
+
+ // ------------------------------------------------------------------------
+ // containers
+
+ 'cgroup.cpu': {
+ mainheads: [
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="CPU"'
+ + ' data-units="%"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'cgroup.mem_usage': {
+ mainheads: [
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Memory"'
+ + ' data-units="MB"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'cgroup.throttle_io': {
+ mainheads: [
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="read"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Read Disk I/O"'
+ + ' data-units="KB/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' role="application"></div>';
+ },
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="write"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Write Disk I/O"'
+ + ' data-units="KB/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
// ------------------------------------------------------------------------
// web_log
@@ -1353,5 +1485,54 @@ netdataDashboard.context = {
commonMin: true,
commonMax: true,
valueRange: "[0, null]"
+ },
+
+ // ------------------------------------------------------------------------
+ // Stiebel Eltron Heat pump installation
+
+ 'stiebeleltron.system.roomtemp': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, null]"
+ },
+
+ // ------------------------------------------------------------------------
+
+ 'chrony.system': {
+ info: 'In normal operation, chronyd never steps the system clock, because any jump in the timescale can have adverse consequences for certain application programs. Instead, any error in the system clock is corrected by slightly speeding up or slowing down the system clock until the error has been removed, and then returning to the system clock’s normal speed. A consequence of this is that there will be a period when the system clock (as read by other programs using the <code>gettimeofday()</code> system call, or by the <code>date</code> command in the shell) will be different from chronyd\'s estimate of the current true time (which it reports to NTP clients when it is operating in server mode). The value reported on this line is the difference due to this effect.',
+ colors: NETDATA.colors[3]
+ },
+
+ 'chrony.offsets': {
+ info: '<code>last offset</code> is the estimated local offset on the last clock update. <code>RMS offset</code> is a long-term average of the offset value.',
+ height: 0.5
+ },
+
+ 'chrony.stratum': {
+ info: 'The <code>stratum</code> indicates how many hops away from a computer with an attached reference clock we are. Such a computer is a stratum-1 computer.',
+ decimalDigits: 0,
+ height: 0.5
+ },
+
+ 'chrony.root': {
+ info: 'Estimated delays against the root time server this system is synchronized with. <code>delay</code> is the total of the network path delays to the stratum-1 computer from which the computer is ultimately synchronised. <code>dispersion</code> is the total dispersion accumulated through all the computers back to the stratum-1 computer from which the computer is ultimately synchronised. Dispersion is due to system clock resolution, statistical measurement variations etc.'
+ },
+
+ 'chrony.frequency': {
+ info: 'The <code>frequency</code> is the rate by which the system\'s clock would be would be wrong if chronyd was not correcting it. It is expressed in ppm (parts per million). For example, a value of 1ppm would mean that when the system\'s clock thinks it has advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.',
+ colors: NETDATA.colors[0]
+ },
+
+ 'chrony.residualfreq': {
+ info: 'This shows the <code>residual frequency</code> for the currently selected reference source. It reflects any difference between what the measurements from the reference source indicate the frequency should be and the frequency currently being used.' +
+ 'The reason this is not always zero is that a smoothing procedure is applied to the frequency. Each time a measurement from the reference source is obtained and a new residual frequency computed, the estimated accuracy of this residual is compared with the estimated accuracy (see <code>skew</code>) of the existing frequency value. A weighted average is computed for the new frequency, with weights depending on these accuracies. If the measurements from the reference source follow a consistent trend, the residual will be driven to zero over time.',
+ height: 0.5,
+ colors: NETDATA.colors[3]
+ },
+
+ 'chrony.skew': {
+ info: 'The estimated error bound on the frequency.',
+ height: 0.5,
+ colors: NETDATA.colors[5]
}
};
diff --git a/web/goto-host-from-alarm.html b/web/goto-host-from-alarm.html
index 16ed2b92a..40592134b 100644
--- a/web/goto-host-from-alarm.html
+++ b/web/goto-host-from-alarm.html
@@ -18,7 +18,7 @@
var netdataTheme = 'slate';
var netdataShowHelp = true;
</script>
-<script type="text/javascript" src="dashboard.js?v20170105-7"></script>
+<script type="text/javascript" src="dashboard.js?v20170724-7"></script>
<script>
var urlOptions = {
diff --git a/web/index.html b/web/index.html
index be944e34d..cd8239d26 100644
--- a/web/index.html
+++ b/web/index.html
@@ -1182,6 +1182,13 @@
return def * this.context[id].height;
else
return def;
+ },
+
+ contextDecimalDigits: function(id, def) {
+ if(typeof this.context[id] !== 'undefined' && typeof this.context[id].decimalDigits !== 'undefined')
+ return this.context[id].decimalDigits;
+ else
+ return def;
}
};
@@ -1514,6 +1521,7 @@
+ ' data-after="-' + duration.toString() + '"'
+ ' data-id="' + NETDATA.name2id(options.hostname + '/' + chart.id) + '"'
+ ' data-colors="' + netdataDashboard.anyAttribute(netdataDashboard.context, 'colors', chart.context, '') + '"'
+ + ' data-decimal-digits="' + netdataDashboard.contextDecimalDigits(chart.context, -1) + '"'
+ chartCommonMin(chart.family, chart.context, chart.units)
+ chartCommonMax(chart.family, chart.context, chart.units)
+ ' role="application"></div>';
@@ -2961,7 +2969,7 @@
});
NETDATA.requiredJs.push({
- url: NETDATA.serverDefault + 'dashboard_info.js?v20170530-1',
+ url: NETDATA.serverDefault + 'dashboard_info.js?v20170916-1',
async: false,
isAlreadyLoaded: function() { return false; }
});
@@ -3589,4 +3597,4 @@
</div>
</body>
</html>
-<script type="text/javascript" src="dashboard.js?v20170715-1"></script>
+<script type="text/javascript" src="dashboard.js?v20170815-14"></script>
diff --git a/web/registry.html b/web/registry.html
index f40f5f2af..16a9db9e6 100644
--- a/web/registry.html
+++ b/web/registry.html
@@ -169,7 +169,7 @@ var netdataRegistryCallback = function(machines_array) {
and that you have chown it to be owned by netdata:netdata
-->
<!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
-<script type="text/javascript" src="dashboard.js?v20170105-7"></script>
+<script type="text/javascript" src="dashboard.js?v20170724-7"></script>
<script>
// Set options for TV operation
diff --git a/web/tv.html b/web/tv.html
index 04cc01ccb..3e2241311 100644
--- a/web/tv.html
+++ b/web/tv.html
@@ -50,7 +50,7 @@ var netdataTheme = 'slate'; // this is dark
and that you have chown it to be owned by netdata:netdata
-->
<!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
-<script type="text/javascript" src="dashboard.js?v20170105-7"></script>
+<script type="text/javascript" src="dashboard.js?v20170724-7"></script>
<script>
// Set options for TV operation
diff --git a/web/version.txt b/web/version.txt
index 4c72a5b03..4d343be3a 100644
--- a/web/version.txt
+++ b/web/version.txt
@@ -1 +1 @@
-4016e2d9e3c2fcf5f6d59827bf5f81083d6645ba
+89ed309252981ddd50f697fde4fe93019cb3e652